summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 21:14:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 21:14:31 +0300
commit6cfae0c26b21dce323fe8799b66cf4bc996e3565 (patch)
tree647f80442929de7ed17cc436c546c21c8c2b2aa9 /drivers
parente6874fc29410fabfdbc8c12b467f41a16cbcfd2b (diff)
parent16a0f687cac70301f49d6f99c4115824e6aad42b (diff)
downloadlinux-6cfae0c26b21dce323fe8799b66cf4bc996e3565.tar.xz
Merge tag 'char-misc-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big char/misc driver pull request for 5.4-rc1. As has been happening in previous releases, more and more individual driver subsystem trees are ending up in here. Now if that is good or bad I can't tell, but hopefully it makes your life easier as it's more of an aggregation of trees together to one merge point for you. Anyway, lots of stuff in here: - habanalabs driver updates - thunderbolt driver updates - misc driver updates - coresight and intel_th hwtracing driver updates - fpga driver updates - extcon driver updates - some dma driver updates - char driver updates - android binder driver updates - nvmem driver updates - phy driver updates - parport driver fixes - pcmcia driver fix - uio driver updates - w1 driver updates - configfs fixes - other assorted driver updates All of these have been in linux-next for a long time with no reported issues" * tag 'char-misc-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (200 commits) misc: mic: Use PTR_ERR_OR_ZERO rather than its implementation habanalabs: correctly cast variable to __le32 habanalabs: show correct id in error print habanalabs: stop using the acronym KMD habanalabs: display card name as sensors header habanalabs: add uapi to retrieve aggregate H/W events habanalabs: add uapi to retrieve device utilization habanalabs: Make the Coresight timestamp perpetual habanalabs: explicitly set the queue-id enumerated numbers habanalabs: print to kernel log when reset is finished habanalabs: replace __le32_to_cpu with le32_to_cpu habanalabs: replace __cpu_to_le32/64 with cpu_to_le32/64 habanalabs: Handle HW_IP_INFO if device disabled or in reset habanalabs: Expose devices after initialization is done habanalabs: improve security in Debug IOCTL habanalabs: use default structure for user input in Debug IOCTL habanalabs: Add descriptive name to PSOC app status register habanalabs: Add descriptive names to PSOC scratch-pad registers habanalabs: create two char devices per ASIC habanalabs: change device_setup_cdev() to be more generic ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/property.c6
-rw-r--r--drivers/android/binder.c100
-rw-r--r--drivers/android/binder_internal.h86
-rw-r--r--drivers/android/binderfs.c290
-rw-r--r--drivers/char/mem.c21
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/toshiba.c8
-rw-r--r--drivers/clk/qcom/clk-rpmh.c16
-rw-r--r--drivers/extcon/extcon-adc-jack.c4
-rw-r--r--drivers/extcon/extcon-arizona.c2
-rw-r--r--drivers/extcon/extcon-axp288.c16
-rw-r--r--drivers/extcon/extcon-fsa9480.c1
-rw-r--r--drivers/extcon/extcon-gpio.c29
-rw-r--r--drivers/extcon/extcon-max77843.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/google/vpd_decode.c55
-rw-r--r--drivers/firmware/google/vpd_decode.h6
-rw-r--r--drivers/firmware/stratix10-rsu.c451
-rw-r--r--drivers/firmware/stratix10-svc.c76
-rw-r--r--drivers/fpga/Kconfig6
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/altera-cvp.c342
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c4
-rw-r--r--drivers/fpga/altera-pr-ip-core.c4
-rw-r--r--drivers/fpga/dfl-afu-error.c230
-rw-r--r--drivers/fpga/dfl-afu-main.c381
-rw-r--r--drivers/fpga/dfl-afu.h9
-rw-r--r--drivers/fpga/dfl-fme-error.c359
-rw-r--r--drivers/fpga/dfl-fme-main.c128
-rw-r--r--drivers/fpga/dfl-fme-pr.c7
-rw-r--r--drivers/fpga/dfl-fme.h6
-rw-r--r--drivers/fpga/dfl-pci.c36
-rw-r--r--drivers/fpga/dfl.c226
-rw-r--r--drivers/fpga/dfl.h52
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h11
-rw-r--r--drivers/hwtracing/intel_th/Makefile3
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c116
-rw-r--r--drivers/hwtracing/intel_th/msu.c537
-rw-r--r--drivers/hwtracing/intel_th/msu.h20
-rw-r--r--drivers/interconnect/core.c27
-rw-r--r--drivers/interconnect/qcom/Kconfig12
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/qcs404.c539
-rw-r--r--drivers/interconnect/qcom/sdm845.c160
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c77
-rw-r--r--drivers/interconnect/qcom/smd-rpm.h15
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/alcor_pci.c6
-rw-r--r--drivers/misc/eeprom/Kconfig5
-rw-r--r--drivers/misc/eeprom/ee1004.c6
-rw-r--r--drivers/misc/eeprom/max6875.c6
-rw-r--r--drivers/misc/fastrpc.c79
-rw-r--r--drivers/misc/habanalabs/asid.c2
-rw-r--r--drivers/misc/habanalabs/command_buffer.c3
-rw-r--r--drivers/misc/habanalabs/command_submission.c27
-rw-r--r--drivers/misc/habanalabs/context.c40
-rw-r--r--drivers/misc/habanalabs/debugfs.c16
-rw-r--r--drivers/misc/habanalabs/device.c488
-rw-r--r--drivers/misc/habanalabs/goya/goya.c95
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h19
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c89
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c109
-rw-r--r--drivers/misc/habanalabs/habanalabs.h129
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c180
-rw-r--r--drivers/misc/habanalabs/hw_queue.c18
-rw-r--r--drivers/misc/habanalabs/hwmon.c24
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h85
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h34
-rw-r--r--drivers/misc/habanalabs/irq.c4
-rw-r--r--drivers/misc/habanalabs/sysfs.c126
-rw-r--r--drivers/misc/lkdtm/Makefile1
-rw-r--r--drivers/misc/lkdtm/bugs.c7
-rw-r--r--drivers/misc/lkdtm/cfi.c42
-rw-r--r--drivers/misc/lkdtm/core.c2
-rw-r--r--drivers/misc/lkdtm/lkdtm.h4
-rw-r--r--drivers/misc/mei/pci-me.c19
-rw-r--r--drivers/misc/mei/pci-txe.c19
-rw-r--r--drivers/misc/mic/card/mic_x100.c28
-rw-r--r--drivers/misc/mic/scif/scif_epd.h5
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c4
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c797
-rw-r--r--drivers/misc/xilinx_sdfec.c1214
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c7
-rw-r--r--drivers/nvmem/imx-ocotp.c7
-rw-r--r--drivers/nvmem/meson-mx-efuse.c3
-rw-r--r--drivers/nvmem/mxs-ocotp.c2
-rw-r--r--drivers/nvmem/sunxi_sid.c1
-rw-r--r--drivers/parport/Makefile2
-rw-r--r--drivers/parport/parport_serial.c6
-rw-r--r--drivers/pcmcia/i82092.c6
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/lantiq/Kconfig11
-rw-r--r--drivers/phy/lantiq/Makefile1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c494
-rw-r--r--drivers/phy/marvell/Kconfig1
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c17
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c525
-rw-r--r--drivers/phy/phy-core.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c44
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c2
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c1
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c1
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c1
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1
-rw-r--r--drivers/phy/samsung/phy-exynos5250-sata.c1
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c33
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/slimbus/slimbus.h2
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/ctl.c23
-rw-r--r--drivers/thunderbolt/eeprom.c6
-rw-r--r--drivers/thunderbolt/icm.c194
-rw-r--r--drivers/thunderbolt/nhi.c134
-rw-r--r--drivers/thunderbolt/nhi.h22
-rw-r--r--drivers/thunderbolt/nhi_ops.c179
-rw-r--r--drivers/thunderbolt/nhi_regs.h37
-rw-r--r--drivers/thunderbolt/switch.c52
-rw-r--r--drivers/thunderbolt/tb_msgs.h16
-rw-r--r--drivers/thunderbolt/tunnel.c4
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/uio/uio_pdrv_genirq.c14
-rw-r--r--drivers/w1/masters/Kconfig9
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/w1/masters/omap_hdq.c4
-rw-r--r--drivers/w1/masters/sgi_w1.c130
-rw-r--r--drivers/w1/slaves/Kconfig6
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds250x.c290
148 files changed, 8804 insertions, 2216 deletions
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2cb35d30cb14..3eacf474e1e3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -39,6 +39,12 @@ static const guid_t prp_guids[] = {
/* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
+ /* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */
+ GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d,
+ 0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7),
+ /* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
+ GUID_INIT(0x6c501103, 0xc189, 0x4296,
+ 0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
};
/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dc1c83eafc22..c0a491277aca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -122,7 +122,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
-static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -196,30 +196,8 @@ static inline void binder_stats_created(enum binder_stat_types type)
atomic_inc(&binder_stats.obj_created[type]);
}
-struct binder_transaction_log_entry {
- int debug_id;
- int debug_id_done;
- int call_type;
- int from_proc;
- int from_thread;
- int target_handle;
- int to_proc;
- int to_thread;
- int to_node;
- int data_size;
- int offsets_size;
- int return_error_line;
- uint32_t return_error;
- uint32_t return_error_param;
- const char *context_name;
-};
-struct binder_transaction_log {
- atomic_t cur;
- bool full;
- struct binder_transaction_log_entry entry[32];
-};
-static struct binder_transaction_log binder_transaction_log;
-static struct binder_transaction_log binder_transaction_log_failed;
+struct binder_transaction_log binder_transaction_log;
+struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
@@ -480,6 +458,7 @@ enum binder_deferred_state {
* @inner_lock: can nest under outer_lock and/or node lock
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
+ * @binderfs_entry: process-specific binderfs log file
*
* Bookkeeping structure for binder processes
*/
@@ -509,6 +488,7 @@ struct binder_proc {
struct binder_context *context;
spinlock_t inner_lock;
spinlock_t outer_lock;
+ struct dentry *binderfs_entry;
};
enum {
@@ -5230,6 +5210,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
+ struct binderfs_info *info;
+ struct dentry *binder_binderfs_dir_entry_proc = NULL;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
@@ -5244,11 +5226,14 @@ static int binder_open(struct inode *nodp, struct file *filp)
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
/* binderfs stashes devices in i_private */
- if (is_binderfs_device(nodp))
+ if (is_binderfs_device(nodp)) {
binder_dev = nodp->i_private;
- else
+ info = nodp->i_sb->s_fs_info;
+ binder_binderfs_dir_entry_proc = info->proc_log_dir;
+ } else {
binder_dev = container_of(filp->private_data,
struct binder_device, miscdev);
+ }
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
@@ -5279,6 +5264,35 @@ static int binder_open(struct inode *nodp, struct file *filp)
&proc_fops);
}
+ if (binder_binderfs_dir_entry_proc) {
+ char strbuf[11];
+ struct dentry *binderfs_entry;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ /*
+ * Similar to debugfs, the process specific log file is shared
+ * between contexts. If the file has already been created for a
+ * process, the following binderfs_create_file() call will
+ * fail with error code EEXIST if another context of the same
+ * process invoked binder_open(). This is ok since same as
+ * debugfs, the log file will contain information on all
+ * contexts of a given PID.
+ */
+ binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
+ strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
+ if (!IS_ERR(binderfs_entry)) {
+ proc->binderfs_entry = binderfs_entry;
+ } else {
+ int error;
+
+ error = PTR_ERR(binderfs_entry);
+ if (error != -EEXIST) {
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
+ strbuf, error);
+ }
+ }
+ }
+
return 0;
}
@@ -5318,6 +5332,12 @@ static int binder_release(struct inode *nodp, struct file *filp)
struct binder_proc *proc = filp->private_data;
debugfs_remove(proc->debugfs_entry);
+
+ if (proc->binderfs_entry) {
+ binderfs_remove_file(proc->binderfs_entry);
+ proc->binderfs_entry = NULL;
+ }
+
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
return 0;
@@ -5907,7 +5927,7 @@ static void print_binder_proc_stats(struct seq_file *m,
}
-static int state_show(struct seq_file *m, void *unused)
+int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -5946,7 +5966,7 @@ static int state_show(struct seq_file *m, void *unused)
return 0;
}
-static int stats_show(struct seq_file *m, void *unused)
+int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -5962,7 +5982,7 @@ static int stats_show(struct seq_file *m, void *unused)
return 0;
}
-static int transactions_show(struct seq_file *m, void *unused)
+int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -6018,7 +6038,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
"\n" : " (incomplete)\n");
}
-static int transaction_log_show(struct seq_file *m, void *unused)
+int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
unsigned int log_cur = atomic_read(&log->cur);
@@ -6050,11 +6070,6 @@ const struct file_operations binder_fops = {
.release = binder_release,
};
-DEFINE_SHOW_ATTRIBUTE(state);
-DEFINE_SHOW_ATTRIBUTE(stats);
-DEFINE_SHOW_ATTRIBUTE(transactions);
-DEFINE_SHOW_ATTRIBUTE(transaction_log);
-
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6108,30 +6123,31 @@ static int __init binder_init(void)
0444,
binder_debugfs_dir_entry_root,
NULL,
- &state_fops);
+ &binder_state_fops);
debugfs_create_file("stats",
0444,
binder_debugfs_dir_entry_root,
NULL,
- &stats_fops);
+ &binder_stats_fops);
debugfs_create_file("transactions",
0444,
binder_debugfs_dir_entry_root,
NULL,
- &transactions_fops);
+ &binder_transactions_fops);
debugfs_create_file("transaction_log",
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
- &transaction_log_fops);
+ &binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
- &transaction_log_fops);
+ &binder_transaction_log_fops);
}
- if (strcmp(binder_devices_param, "") != 0) {
+ if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
+ strcmp(binder_devices_param, "") != 0) {
/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 045b3e42d98b..bd47f7f72075 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -35,15 +35,63 @@ struct binder_device {
struct inode *binderfs_inode;
};
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
extern const struct file_operations binder_fops;
+extern char *binder_devices_param;
+
#ifdef CONFIG_ANDROID_BINDERFS
extern bool is_binderfs_device(const struct inode *inode);
+extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
+ const struct file_operations *fops,
+ void *data);
+extern void binderfs_remove_file(struct dentry *dentry);
#else
static inline bool is_binderfs_device(const struct inode *inode)
{
return false;
}
+static inline struct dentry *binderfs_create_file(struct dentry *dir,
+ const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ return NULL;
+}
+static inline void binderfs_remove_file(struct dentry *dentry) {}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
@@ -55,4 +103,42 @@ static inline int __init init_binderfs(void)
}
#endif
+int binder_stats_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_stats);
+
+int binder_state_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_state);
+
+int binder_transactions_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_transactions);
+
+int binder_transaction_log_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int debug_id_done;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
+ const char *context_name;
+};
+
+struct binder_transaction_log {
+ atomic_t cur;
+ bool full;
+ struct binder_transaction_log_entry entry[32];
+};
+
+extern struct binder_transaction_log binder_transaction_log;
+extern struct binder_transaction_log binder_transaction_log_failed;
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e773f45d19d9..e2580e5316a2 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -48,45 +48,23 @@ static dev_t binderfs_dev;
static DEFINE_MUTEX(binderfs_minors_mutex);
static DEFINE_IDA(binderfs_minors);
-/**
- * binderfs_mount_opts - mount options for binderfs
- * @max: maximum number of allocatable binderfs binder devices
- */
-struct binderfs_mount_opts {
- int max;
-};
-
enum {
Opt_max,
+ Opt_stats_mode,
Opt_err
};
+enum binderfs_stats_mode {
+ STATS_NONE,
+ STATS_GLOBAL,
+};
+
static const match_table_t tokens = {
{ Opt_max, "max=%d" },
+ { Opt_stats_mode, "stats=%s" },
{ Opt_err, NULL }
};
-/**
- * binderfs_info - information about a binderfs mount
- * @ipc_ns: The ipc namespace the binderfs mount belongs to.
- * @control_dentry: This records the dentry of this binderfs mount
- * binder-control device.
- * @root_uid: uid that needs to be used when a new binder device is
- * created.
- * @root_gid: gid that needs to be used when a new binder device is
- * created.
- * @mount_opts: The mount options in use.
- * @device_count: The current number of allocated binder devices.
- */
-struct binderfs_info {
- struct ipc_namespace *ipc_ns;
- struct dentry *control_dentry;
- kuid_t root_uid;
- kgid_t root_gid;
- struct binderfs_mount_opts mount_opts;
- int device_count;
-};
-
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
@@ -186,8 +164,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
req->major = MAJOR(binderfs_dev);
req->minor = minor;
- ret = copy_to_user(userp, req, sizeof(*req));
- if (ret) {
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
ret = -EFAULT;
goto err;
}
@@ -272,7 +249,7 @@ static void binderfs_evict_inode(struct inode *inode)
clear_inode(inode);
- if (!device)
+ if (!S_ISCHR(inode->i_mode) || !device)
return;
mutex_lock(&binderfs_minors_mutex);
@@ -291,8 +268,9 @@ static void binderfs_evict_inode(struct inode *inode)
static int binderfs_parse_mount_opts(char *data,
struct binderfs_mount_opts *opts)
{
- char *p;
+ char *p, *stats;
opts->max = BINDERFS_MAX_MINOR;
+ opts->stats_mode = STATS_NONE;
while ((p = strsep(&data, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
@@ -312,6 +290,22 @@ static int binderfs_parse_mount_opts(char *data,
opts->max = max_devices;
break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ stats = match_strdup(&args[0]);
+ if (!stats)
+ return -ENOMEM;
+
+ if (strcmp(stats, "global") != 0) {
+ kfree(stats);
+ return -EINVAL;
+ }
+
+ opts->stats_mode = STATS_GLOBAL;
+ kfree(stats);
+ break;
default:
pr_err("Invalid mount options\n");
return -EINVAL;
@@ -323,8 +317,21 @@ static int binderfs_parse_mount_opts(char *data,
static int binderfs_remount(struct super_block *sb, int *flags, char *data)
{
+ int prev_stats_mode, ret;
struct binderfs_info *info = sb->s_fs_info;
- return binderfs_parse_mount_opts(data, &info->mount_opts);
+
+ prev_stats_mode = info->mount_opts.stats_mode;
+ ret = binderfs_parse_mount_opts(data, &info->mount_opts);
+ if (ret)
+ return ret;
+
+ if (prev_stats_mode != info->mount_opts.stats_mode) {
+ pr_err("Binderfs stats mode cannot be changed during a remount\n");
+ info->mount_opts.stats_mode = prev_stats_mode;
+ return -EINVAL;
+ }
+
+ return 0;
}
static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
@@ -334,6 +341,8 @@ static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
info = root->d_sb->s_fs_info;
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
seq_printf(seq, ",max=%d", info->mount_opts.max);
+ if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ seq_printf(seq, ",stats=global");
return 0;
}
@@ -462,11 +471,192 @@ static const struct inode_operations binderfs_dir_inode_operations = {
.unlink = binderfs_unlink,
};
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ }
+ return ret;
+}
+
+static struct dentry *binderfs_create_dentry(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ return dentry;
+
+ /* Return error if the file/dir already exists. */
+ if (d_really_is_positive(dentry)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return dentry;
+}
+
+void binderfs_remove_file(struct dentry *dentry)
+{
+ struct inode *parent_inode;
+
+ parent_inode = d_inode(dentry->d_parent);
+ inode_lock(parent_inode);
+ if (simple_positive(dentry)) {
+ dget(dentry);
+ simple_unlink(parent_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(parent_inode);
+}
+
+struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+ d_instantiate(dentry, new_inode);
+ fsnotify_create(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ set_nlink(new_inode, 2);
+ d_instantiate(dentry, new_inode);
+ inc_nlink(parent_inode);
+ fsnotify_mkdir(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "stats",
+ &binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "state",
+ &binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "transactions",
+ &binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ "transaction_log",
+ &binder_transaction_log_fops,
+ &binder_transaction_log);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ "failed_transaction_log",
+ &binder_transaction_log_fops,
+ &binder_transaction_log_failed);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
{
int ret;
struct binderfs_info *info;
struct inode *inode = NULL;
+ struct binderfs_device device_info = { 0 };
+ const char *name;
+ size_t len;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -521,7 +711,25 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root)
return -ENOMEM;
- return binderfs_binder_ctl_create(sb);
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ return init_binder_logs(sb);
+
+ return 0;
}
static struct dentry *binderfs_mount(struct file_system_type *fs_type,
@@ -553,6 +761,18 @@ static struct file_system_type binder_fs_type = {
int __init init_binderfs(void)
{
int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
/* Allocate new major number for binderfs. */
ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index b08dc50f9f26..9eb564c002f6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
+static inline bool should_stop_iteration(void)
+{
+ if (need_resched())
+ cond_resched();
+ return fatal_signal_pending(current);
+}
+
/*
* This funcion reads the *physical* memory. The f_pos points directly to the
* memory location.
@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
p += sz;
count -= sz;
read += sz;
+ if (should_stop_iteration())
+ break;
}
kfree(bounce);
@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
p += sz;
count -= sz;
written += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += written;
@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
read += sz;
low_count -= sz;
count -= sz;
+ if (should_stop_iteration()) {
+ count = 0;
+ break;
+ }
}
}
@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
buf += sz;
read += sz;
p += sz;
+ if (should_stop_iteration())
+ break;
}
free_page((unsigned long)kbuf);
}
@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
p += sz;
count -= sz;
written += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += written;
@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
buf += sz;
virtr += sz;
p += sz;
+ if (should_stop_iteration())
+ break;
}
free_page((unsigned long)kbuf);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index f0a8adca1eee..c86f18aa8985 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -737,7 +737,7 @@ static int pp_release(struct inode *inode, struct file *file)
"negotiated back to compatibility mode because user-space forgot\n");
}
- if (pp->flags & PP_CLAIMED) {
+ if ((pp->flags & PP_CLAIMED) && pp->pdev) {
struct ieee1284_info *info;
info = &pp->pdev->port->ieee1284;
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 0bdc602f0d48..98f3150e0048 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -373,7 +373,7 @@ static int tosh_get_machine_id(void __iomem *bios)
value. This has been verified on a Satellite Pro 430CDT,
Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */
#if TOSH_DEBUG
- printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
+ pr_debug("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
#endif
bx = 0xe6f5;
@@ -417,7 +417,7 @@ static int tosh_probe(void)
for (i=0;i<7;i++) {
if (readb(bios+0xe010+i)!=signature[i]) {
- printk("toshiba: not a supported Toshiba laptop\n");
+ pr_err("toshiba: not a supported Toshiba laptop\n");
iounmap(bios);
return -ENODEV;
}
@@ -433,7 +433,7 @@ static int tosh_probe(void)
/* if this is not a Toshiba laptop carry flag is set and ah=0x86 */
if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) {
- printk("toshiba: not a supported Toshiba laptop\n");
+ pr_err("toshiba: not a supported Toshiba laptop\n");
iounmap(bios);
return -ENODEV;
}
@@ -486,7 +486,7 @@ static int __init toshiba_init(void)
if (tosh_probe())
return -ENODEV;
- printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n");
+ pr_info("Toshiba System Management Mode driver v" TOSH_VERSION "\n");
/* set the port to use for Fn status if not specified as a parameter */
if (tosh_fn==0x00)
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c3fd632af119..a32bfaeb7e61 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -12,23 +12,13 @@
#include <linux/platform_device.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#define CLK_RPMH_ARC_EN_OFFSET 0
#define CLK_RPMH_VRM_EN_OFFSET 4
-#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
-#define BCM_TCS_CMD_VALID_SHIFT 29
-#define BCM_TCS_CMD_VOTE_MASK 0x3fff
-#define BCM_TCS_CMD_VOTE_SHIFT 0
-
-#define BCM_TCS_CMD(valid, vote) \
- (BCM_TCS_CMD_COMMIT_MASK | \
- ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \
- ((vote & BCM_TCS_CMD_VOTE_MASK) \
- << BCM_TCS_CMD_VOTE_SHIFT))
-
/**
* struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
* @unit: divisor used to convert Hz value to an RPMh msg
@@ -269,7 +259,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
}
cmd.addr = c->res_addr;
- cmd.data = BCM_TCS_CMD(enable, cmd_state);
+ cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
if (ret) {
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index ee9b5f70bfa4..ad02dc6747a4 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -140,10 +140,8 @@ static int adc_jack_probe(struct platform_device *pdev)
return err;
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
err = request_any_context_irq(data->irq, adc_jack_irq_thread,
pdata->irq_flags, pdata->name, data);
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7e9f4c9ee87d..e970134c95fa 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1253,7 +1253,7 @@ static int arizona_extcon_get_micd_configs(struct device *dev,
int i, j;
u32 *vals;
- nconfs = device_property_read_u32_array(arizona->dev, prop, NULL, 0);
+ nconfs = device_property_count_u32(arizona->dev, prop);
if (nconfs <= 0)
return 0;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 7254852e6ec0..415afaf479e7 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -121,7 +121,6 @@ static const char * const axp288_pwr_up_down_info[] = {
"Last shutdown caused by PMIC UVLO threshold",
"Last shutdown caused by SOC initiated cold off",
"Last shutdown caused by user pressing the power button",
- NULL,
};
/*
@@ -130,18 +129,21 @@ static const char * const axp288_pwr_up_down_info[] = {
*/
static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
{
- const char * const *rsi;
unsigned int val, i, clear_mask = 0;
+ unsigned long bits;
int ret;
ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val);
- for (i = 0, rsi = axp288_pwr_up_down_info; *rsi; rsi++, i++) {
- if (val & BIT(i)) {
- dev_dbg(info->dev, "%s\n", *rsi);
- clear_mask |= BIT(i);
- }
+ if (ret < 0) {
+ dev_err(info->dev, "failed to read reset source indicator\n");
+ return;
}
+ bits = val & GENMASK(ARRAY_SIZE(axp288_pwr_up_down_info) - 1, 0);
+ for_each_set_bit(i, &bits, ARRAY_SIZE(axp288_pwr_up_down_info))
+ dev_dbg(info->dev, "%s\n", axp288_pwr_up_down_info[i]);
+ clear_mask = bits;
+
/* Clear the register value for next reboot (write 1 to clear bit) */
regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
}
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index 350fb34abfa0..8405512f5199 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -363,6 +363,7 @@ MODULE_DEVICE_TABLE(i2c, fsa9480_id);
static const struct of_device_id fsa9480_of_match[] = {
{ .compatible = "fcs,fsa9480", },
+ { .compatible = "fcs,fsa880", },
{ },
};
MODULE_DEVICE_TABLE(of, fsa9480_of_match);
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index faddeac948db..c211222f5d0c 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -22,26 +22,22 @@
/**
* struct gpio_extcon_data - A simple GPIO-controlled extcon device state container.
* @edev: Extcon device.
- * @irq: Interrupt line for the external connector.
* @work: Work fired by the interrupt.
* @debounce_jiffies: Number of jiffies to wait for the GPIO to stabilize, from the debounce
* value.
* @gpiod: GPIO descriptor for this external connector.
* @extcon_id: The unique id of specific external connector.
* @debounce: Debounce time for GPIO IRQ in ms.
- * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
* @check_on_resume: Boolean describing whether to check the state of gpio
* while resuming from sleep.
*/
struct gpio_extcon_data {
struct extcon_dev *edev;
- int irq;
struct delayed_work work;
unsigned long debounce_jiffies;
struct gpio_desc *gpiod;
unsigned int extcon_id;
unsigned long debounce;
- unsigned long irq_flags;
bool check_on_resume;
};
@@ -69,6 +65,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
{
struct gpio_extcon_data *data;
struct device *dev = &pdev->dev;
+ unsigned long irq_flags;
+ int irq;
int ret;
data = devm_kzalloc(dev, sizeof(struct gpio_extcon_data), GFP_KERNEL);
@@ -82,15 +80,26 @@ static int gpio_extcon_probe(struct platform_device *pdev)
* developed to get the extcon id from device-tree or others.
* On later, it have to be solved.
*/
- if (!data->irq_flags || data->extcon_id > EXTCON_NONE)
+ if (data->extcon_id > EXTCON_NONE)
return -EINVAL;
data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
if (IS_ERR(data->gpiod))
return PTR_ERR(data->gpiod);
- data->irq = gpiod_to_irq(data->gpiod);
- if (data->irq <= 0)
- return data->irq;
+ irq = gpiod_to_irq(data->gpiod);
+ if (irq <= 0)
+ return irq;
+
+ /*
+ * It is unlikely that this is an acknowledged interrupt that goes
+ * away after handling, what we are looking for are falling edges
+ * if the signal is active low, and rising edges if the signal is
+ * active high.
+ */
+ if (gpiod_is_active_low(data->gpiod))
+ irq_flags = IRQF_TRIGGER_FALLING;
+ else
+ irq_flags = IRQF_TRIGGER_RISING;
/* Allocate the memory of extcon devie and register extcon device */
data->edev = devm_extcon_dev_allocate(dev, &data->extcon_id);
@@ -109,8 +118,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
* Request the interrupt of gpio to detect whether external connector
* is attached or detached.
*/
- ret = devm_request_any_context_irq(dev, data->irq,
- gpio_irq_handler, data->irq_flags,
+ ret = devm_request_any_context_irq(dev, irq,
+ gpio_irq_handler, irq_flags,
pdev->name, data);
if (ret < 0)
return ret;
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index a343a6ef3506..e6b50ca83008 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -774,12 +774,12 @@ static int max77843_init_muic_regmap(struct max77693_dev *max77843)
{
int ret;
- max77843->i2c_muic = i2c_new_dummy(max77843->i2c->adapter,
+ max77843->i2c_muic = i2c_new_dummy_device(max77843->i2c->adapter,
I2C_ADDR_MUIC);
- if (!max77843->i2c_muic) {
+ if (IS_ERR(max77843->i2c_muic)) {
dev_err(&max77843->i2c->dev,
"Cannot allocate I2C device for MUIC\n");
- return -ENOMEM;
+ return PTR_ERR(max77843->i2c_muic);
}
i2c_set_clientdata(max77843->i2c_muic, max77843);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 98e4f616b8f1..dc43847ad2b0 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -597,7 +597,7 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
ret = devm_request_threaded_irq(info->dev, virq, NULL,
sm5502_muic_irq_handler,
- IRQF_NO_SUSPEND,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
muic_irq->name, info);
if (ret) {
dev_err(info->dev,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index c9a827bffe1c..e40a77bfe821 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -216,6 +216,24 @@ config INTEL_STRATIX10_SERVICE
Say Y here if you want Stratix10 service layer support.
+config INTEL_STRATIX10_RSU
+ tristate "Intel Stratix10 Remote System Update"
+ depends on INTEL_STRATIX10_SERVICE
+ help
+ The Intel Remote System Update (RSU) driver exposes interfaces
+ access through the Intel Service Layer to user space via sysfs
+ device attribute nodes. The RSU interfaces report/control some of
+ the optional RSU features of the Stratix 10 SoC FPGA.
+
+ The RSU provides a way for customers to update the boot
+ configuration of a Stratix 10 SoC device with significantly reduced
+ risk of corrupting the bitstream storage and bricking the system.
+
+ Enable RSU support if you are using an Intel SoC FPGA with the RSU
+ feature enabled and you want Linux user space control.
+
+ Say Y here if you want Intel RSU support.
+
config QCOM_SCM
bool
depends on ARM || ARM64
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 2b6e3a0be595..3fcb91975bdc 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EDD) += edd.o
obj-$(CONFIG_EFI_PCDP) += pcdp.o
obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
+obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 0739f3b70347..db0812263d46 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
return VPD_OK;
}
-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
- const u8 *value, s32 value_len,
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
void *arg)
{
int ret;
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index 92e3258552fc..dda525c0f968 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -9,8 +9,8 @@
#include "vpd_decode.h"
-static int vpd_decode_len(const s32 max_len, const u8 *in,
- s32 *length, s32 *decoded_len)
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+ u32 *length, u32 *decoded_len)
{
u8 more;
int i = 0;
@@ -30,18 +30,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
} while (more);
*decoded_len = i;
+ return VPD_OK;
+}
+
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+ u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+ u32 decoded_len;
+ u32 consumed = *_consumed;
+
+ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+ entry_len, &decoded_len) != VPD_OK)
+ return VPD_FAIL;
+ if (max_len - consumed < decoded_len)
+ return VPD_FAIL;
+
+ consumed += decoded_len;
+ *entry = input_buf + consumed;
+
+ /* entry_len is untrusted data and must be checked again. */
+ if (max_len - consumed < *entry_len)
+ return VPD_FAIL;
+ consumed += decoded_len;
+ *_consumed = consumed;
return VPD_OK;
}
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
vpd_decode_callback callback, void *callback_arg)
{
int type;
- int res;
- s32 key_len;
- s32 value_len;
- s32 decoded_len;
+ u32 key_len;
+ u32 value_len;
const u8 *key;
const u8 *value;
@@ -56,26 +77,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
case VPD_TYPE_STRING:
(*consumed)++;
- /* key */
- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
- &key_len, &decoded_len);
- if (res != VPD_OK || *consumed + decoded_len >= max_len)
+ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+ &key_len) != VPD_OK)
return VPD_FAIL;
- *consumed += decoded_len;
- key = &input_buf[*consumed];
- *consumed += key_len;
-
- /* value */
- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
- &value_len, &decoded_len);
- if (res != VPD_OK || *consumed + decoded_len > max_len)
+ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+ &value_len) != VPD_OK)
return VPD_FAIL;
- *consumed += decoded_len;
- value = &input_buf[*consumed];
- *consumed += value_len;
-
if (type == VPD_TYPE_STRING)
return callback(key, key_len, value, value_len,
callback_arg);
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
index cf8c2ace155a..8dbe41cac599 100644
--- a/drivers/firmware/google/vpd_decode.h
+++ b/drivers/firmware/google/vpd_decode.h
@@ -25,8 +25,8 @@ enum {
};
/* Callback for vpd_decode_string to invoke. */
-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
- const u8 *value, s32 value_len,
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
void *arg);
/*
@@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
* If one entry is successfully decoded, sends it to callback and returns the
* result.
*/
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
vpd_decode_callback callback, void *callback_arg);
#endif /* __VPD_DECODE_H */
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
new file mode 100644
index 000000000000..bb008c019920
--- /dev/null
+++ b/drivers/firmware/stratix10-rsu.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#define RSU_STATE_MASK GENMASK_ULL(31, 0)
+#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
+#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
+#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
+
+#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
+
+#define INVALID_RETRY_COUNTER 0xFFFFFFFF
+
+typedef void (*rsu_callback)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data);
+/**
+ * struct stratix10_rsu_priv - rsu data structure
+ * @chan: pointer to the allocated service channel
+ * @client: active service client
+ * @completion: state for callback completion
+ * @lock: a mutex to protect callback completion state
+ * @status.current_image: address of image currently running in flash
+ * @status.fail_image: address of failed image in flash
+ * @status.version: the version number of RSU firmware
+ * @status.state: the state of RSU system
+ * @status.error_details: error code
+ * @status.error_location: the error offset inside the image that failed
+ * @retry_counter: the current image's retry counter
+ */
+struct stratix10_rsu_priv {
+ struct stratix10_svc_chan *chan;
+ struct stratix10_svc_client client;
+ struct completion completion;
+ struct mutex lock;
+ struct {
+ unsigned long current_image;
+ unsigned long fail_image;
+ unsigned int version;
+ unsigned int state;
+ unsigned int error_details;
+ unsigned int error_location;
+ } status;
+ unsigned int retry_counter;
+};
+
+/**
+ * rsu_status_callback() - Status callback from Intel Service Layer
+ * @client: pointer to service client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU status request. Status is
+ * only updated after a system reboot, so a get updated status call is
+ * made during driver probe.
+ */
+static void rsu_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+ priv->status.version = FIELD_GET(RSU_VERSION_MASK,
+ res->a2);
+ priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
+ priv->status.fail_image = res->a1;
+ priv->status.current_image = res->a0;
+ priv->status.error_location =
+ FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
+ priv->status.error_details =
+ FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
+ } else {
+ dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
+ res->a0);
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->status.fail_image = 0;
+ priv->status.current_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ }
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_command_callback() - Update callback from Intel Service Layer
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU commands.
+ */
+static void rsu_command_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+
+ if (data->status != BIT(SVC_STATUS_RSU_OK))
+ dev_err(client->dev, "RSU returned status is %i\n",
+ data->status);
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_retry_callback() - Callback from Intel service layer for getting
+ * the current image's retry counter from firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for retry counter, which is used by
+ * user to know how many times the images is still allowed to reload
+ * itself before giving up and starting RSU fail-over flow.
+ */
+static void rsu_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *counter = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_RSU_OK))
+ priv->retry_counter = *counter;
+ else
+ dev_err(client->dev, "Failed to get retry counter %i\n",
+ data->status);
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_send_msg() - send a message to Intel service layer
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, the bitstream address or notify status
+ * @callback: function pointer for the callback (status or update)
+ *
+ * Start an Intel service layer transaction to perform the SMC call that
+ * is necessary to get RSU boot log or set the address of bitstream to
+ * boot after reboot.
+ *
+ * Returns 0 on success or -ETIMEDOUT on error.
+ */
+static int rsu_send_msg(struct stratix10_rsu_priv *priv,
+ enum stratix10_svc_command_code command,
+ unsigned long arg,
+ rsu_callback callback)
+{
+ struct stratix10_svc_client_msg msg;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ reinit_completion(&priv->completion);
+ priv->client.receive_cb = callback;
+
+ msg.command = command;
+ if (arg)
+ msg.arg[0] = arg;
+
+ ret = stratix10_svc_send(priv->chan, &msg);
+ if (ret < 0)
+ goto status_done;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->completion,
+ RSU_TIMEOUT);
+ if (!ret) {
+ dev_err(priv->client.dev,
+ "timeout waiting for SMC call\n");
+ ret = -ETIMEDOUT;
+ goto status_done;
+ } else if (ret < 0) {
+ dev_err(priv->client.dev,
+ "error %d waiting for SMC call\n", ret);
+ goto status_done;
+ } else {
+ ret = 0;
+ }
+
+status_done:
+ stratix10_svc_done(priv->chan);
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
+ * The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
+ * related. They allow user space software to query the configuration system
+ * status and to request optional reboot behavior specific to Intel FPGAs.
+ */
+
+static ssize_t current_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.current_image);
+}
+
+static ssize_t fail_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.version);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.state);
+}
+
+static ssize_t error_location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_location);
+}
+
+static ssize_t error_details_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_details);
+}
+
+static ssize_t retry_counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->retry_counter);
+}
+
+static ssize_t reboot_image_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long address;
+ int ret;
+
+ if (priv == 0)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &address);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
+ address, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU update returned %i\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t notify_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long status;
+ int ret;
+
+ if (priv == 0)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &status);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
+ status, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU notify returned %i\n", ret);
+ return ret;
+ }
+
+ /* to get the updated state */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ return ret;
+ }
+
+ /* only 19.3 or late version FW supports retry counter feature */
+ if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
+ 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev,
+ "Error, getting RSU retry %i\n", ret);
+ return ret;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(current_image);
+static DEVICE_ATTR_RO(fail_image);
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(error_location);
+static DEVICE_ATTR_RO(error_details);
+static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_WO(reboot_image);
+static DEVICE_ATTR_WO(notify);
+
+static struct attribute *rsu_attrs[] = {
+ &dev_attr_current_image.attr,
+ &dev_attr_fail_image.attr,
+ &dev_attr_state.attr,
+ &dev_attr_version.attr,
+ &dev_attr_error_location.attr,
+ &dev_attr_error_details.attr,
+ &dev_attr_retry_counter.attr,
+ &dev_attr_reboot_image.attr,
+ &dev_attr_notify.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rsu);
+
+static int stratix10_rsu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stratix10_rsu_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client.dev = dev;
+ priv->client.receive_cb = NULL;
+ priv->client.priv = priv;
+ priv->status.current_image = 0;
+ priv->status.fail_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->retry_counter = INVALID_RETRY_COUNTER;
+
+ mutex_init(&priv->lock);
+ priv->chan = stratix10_svc_request_channel_byname(&priv->client,
+ SVC_CLIENT_RSU);
+ if (IS_ERR(priv->chan)) {
+ dev_err(dev, "couldn't get service channel %s\n",
+ SVC_CLIENT_RSU);
+ return PTR_ERR(priv->chan);
+ }
+
+ init_completion(&priv->completion);
+ platform_set_drvdata(pdev, priv);
+
+ /* get the initial state from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ /* only 19.3 or late version FW supports retry counter feature */
+ if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
+ rsu_retry_callback);
+ if (ret) {
+ dev_err(dev,
+ "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+ }
+
+ return ret;
+}
+
+static int stratix10_rsu_remove(struct platform_device *pdev)
+{
+ struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
+
+ stratix10_svc_free_channel(priv->chan);
+ return 0;
+}
+
+static struct platform_driver stratix10_rsu_driver = {
+ .probe = stratix10_rsu_probe,
+ .remove = stratix10_rsu_remove,
+ .driver = {
+ .name = "stratix10-rsu",
+ .dev_groups = rsu_groups,
+ },
+};
+
+module_platform_driver(stratix10_rsu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Remote System Update Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 6e6514825ad0..b485321189e1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -38,6 +38,9 @@
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
+/* stratix10 service layer clients */
+#define STRATIX10_RSU "stratix10-rsu"
+
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
@@ -45,6 +48,14 @@ typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
struct stratix10_svc_chan;
/**
+ * struct stratix10_svc - svc private data
+ * @stratix10_svc_rsu: pointer to stratix10 RSU device
+ */
+struct stratix10_svc {
+ struct platform_device *stratix10_svc_rsu;
+};
+
+/**
* struct stratix10_svc_sh_memory - service shared memory structure
* @sync_complete: state for a completion
* @addr: physical address of shared memory block
@@ -296,7 +307,12 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
break;
case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ break;
+ case COMMAND_RSU_RETRY:
cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->kaddr1 = &res.a1;
break;
default:
pr_warn("it shouldn't happen\n");
@@ -386,6 +402,16 @@ static int svc_normal_to_secure_thread(void *data)
a1 = pdata->arg[0];
a2 = 0;
break;
+ case COMMAND_RSU_NOTIFY:
+ a0 = INTEL_SIP_SMC_RSU_NOTIFY;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_RSU_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -438,7 +464,28 @@ static int svc_normal_to_secure_thread(void *data)
pr_debug("%s: STATUS_REJECTED\n", __func__);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
+ switch (pdata->command) {
+ /* for FPGA mgr */
+ case COMMAND_RECONFIG_DATA_CLAIM:
+ case COMMAND_RECONFIG:
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ case COMMAND_RECONFIG_STATUS:
+ cbdata->status =
+ BIT(SVC_STATUS_RECONFIG_ERROR);
+ break;
+
+ /* for RSU */
+ case COMMAND_RSU_STATUS:
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ case COMMAND_RSU_RETRY:
+ cbdata->status =
+ BIT(SVC_STATUS_RSU_ERROR);
+ break;
+ }
+
cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
@@ -530,7 +577,7 @@ static int svc_get_sh_memory(struct platform_device *pdev,
if (!sh_memory->addr || !sh_memory->size) {
dev_err(dev,
- "fails to get shared memory info from secure world\n");
+ "failed to get shared memory info from secure world\n");
return -ENOMEM;
}
@@ -768,7 +815,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
"svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
- "fails to create svc_smc_hvc_thread\n");
+ "failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
@@ -913,6 +960,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
struct stratix10_svc_chan *chans;
struct gen_pool *genpool;
struct stratix10_svc_sh_memory *sh_memory;
+ struct stratix10_svc *svc;
+
svc_invoke_fn *invoke_fn;
size_t fifo_size;
int ret;
@@ -957,7 +1006,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
- dev_err(dev, "fails to allocate FIFO\n");
+ dev_err(dev, "failed to allocate FIFO\n");
return ret;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -975,6 +1024,24 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
+ /* add svc client device(s) */
+ svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
+ if (!svc->stratix10_svc_rsu) {
+ dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(svc->stratix10_svc_rsu);
+ if (ret) {
+ platform_device_put(svc->stratix10_svc_rsu);
+ return ret;
+ }
+ dev_set_drvdata(dev, svc);
+
pr_info("Intel Service Layer Driver Initialized\n");
return ret;
@@ -982,8 +1049,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
static int stratix10_svc_drv_remove(struct platform_device *pdev)
{
+ struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ platform_device_unregister(svc->stratix10_svc_rsu);
+
kfifo_free(&ctrl->svc_fifo);
if (ctrl->task) {
kthread_stop(ctrl->task);
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index cdd4f73b4869..73c779e920ed 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -46,11 +46,11 @@ config FPGA_MGR_ALTERA_PS_SPI
using the passive serial interface over SPI.
config FPGA_MGR_ALTERA_CVP
- tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+ tristate "Altera CvP FPGA Manager"
depends on PCI
help
- FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
- and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V,
+ Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe.
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 312b9371742f..4865b74b00a4 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -39,8 +39,9 @@ obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
-dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
+dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
+dfl-afu-objs += dfl-afu-error.o
# Drivers for FPGAs which implement DFL
obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 770915fb97f9..4e0edb60bfba 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -22,10 +22,10 @@
#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
/* Vendor Specific Extended Capability Registers */
-#define VSE_PCIE_EXT_CAP_ID 0x200
+#define VSE_PCIE_EXT_CAP_ID 0x0
#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
-#define VSE_CVP_STATUS 0x21c /* 32bit */
+#define VSE_CVP_STATUS 0x1c /* 32bit */
#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
@@ -33,41 +33,93 @@
#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
-#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */
+#define VSE_CVP_MODE_CTRL 0x20 /* 32bit */
#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
-#define VSE_CVP_DATA 0x228 /* 32bit */
-#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */
+#define VSE_CVP_DATA 0x28 /* 32bit */
+#define VSE_CVP_PROG_CTRL 0x2c /* 32bit */
#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
+#define VSE_CVP_PROG_CTRL_MASK GENMASK(1, 0)
-#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */
+#define VSE_UNCOR_ERR_STATUS 0x34 /* 32bit */
#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
+#define V1_VSEC_OFFSET 0x200 /* Vendor Specific Offset V1 */
+/* V2 Defines */
+#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
+
+#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CHECK_CREDIT_US 10
+#define V2_POLL_TIMEOUT_US 1000000
+#define V2_USER_TIMEOUT_US 500000
+
+#define V1_POLL_TIMEOUT_US 10
+
#define DRV_NAME "altera-cvp"
#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
+/* Write block sizes */
+#define ALTERA_CVP_V1_SIZE 4
+#define ALTERA_CVP_V2_SIZE 4096
+
/* Optional CvP config error status check for debugging */
static bool altera_cvp_chkcfg;
+struct cvp_priv;
+
struct altera_cvp_conf {
struct fpga_manager *mgr;
struct pci_dev *pci_dev;
void __iomem *map;
- void (*write_data)(struct altera_cvp_conf *, u32);
+ void (*write_data)(struct altera_cvp_conf *conf,
+ u32 data);
char mgr_name[64];
u8 numclks;
+ u32 sent_packets;
+ u32 vsec_offset;
+ const struct cvp_priv *priv;
+};
+
+struct cvp_priv {
+ void (*switch_clk)(struct altera_cvp_conf *conf);
+ int (*clear_state)(struct altera_cvp_conf *conf);
+ int (*wait_credit)(struct fpga_manager *mgr, u32 blocks);
+ size_t block_size;
+ int poll_time_us;
+ int user_time_us;
};
+static int altera_read_config_byte(struct altera_cvp_conf *conf,
+ int where, u8 *val)
+{
+ return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static int altera_read_config_dword(struct altera_cvp_conf *conf,
+ int where, u32 *val)
+{
+ return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static int altera_write_config_dword(struct altera_cvp_conf *conf,
+ int where, u32 val)
+{
+ return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
{
struct altera_cvp_conf *conf = mgr->priv;
u32 status;
- pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status);
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &status);
if (status & VSE_CVP_STATUS_CFG_DONE)
return FPGA_MGR_STATE_OPERATING;
@@ -85,7 +137,8 @@ static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
{
- pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val);
+ pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA,
+ val);
}
/* switches between CvP clock and internal clock */
@@ -95,10 +148,10 @@ static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
u32 val;
/* set 1 CVP clock cycle for every CVP Data Register Write */
- pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
- pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
for (i = 0; i < CVP_DUMMY_WR; i++)
conf->write_data(conf, 0); /* dummy data, could be any value */
@@ -115,7 +168,7 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
retries++;
do {
- pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
if ((val & status_mask) == status_val)
return 0;
@@ -126,32 +179,136 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
return -ETIMEDOUT;
}
+static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 val;
+ int ret;
+
+ /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+ ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
+ if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) {
+ dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+ bytes);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+/*
+ * CvP Version2 Functions
+ * Recent Intel FPGAs use a credit mechanism to throttle incoming
+ * bitstreams and a different method of clearing the state.
+ */
+
+static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf)
+{
+ u32 val;
+ int ret;
+
+ /* Clear the START_XFER and CVP_CONFIG bits */
+ ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error reading CVP Program Control Register\n");
+ return ret;
+ }
+
+ val &= ~VSE_CVP_PROG_CTRL_MASK;
+ ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error writing CVP Program Control Register\n");
+ return ret;
+ }
+
+ return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
+ conf->priv->poll_time_us);
+}
+
+static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr,
+ u32 blocks)
+{
+ u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US;
+ struct altera_cvp_conf *conf = mgr->priv;
+ int ret;
+ u8 val;
+
+ do {
+ ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error reading CVP Credit Register\n");
+ return ret;
+ }
+
+ /* Return if there is space in FIFO */
+ if (val - (u8)conf->sent_packets)
+ return 0;
+
+ ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "CE Bit error credit reg[0x%x]:sent[0x%x]\n",
+ val, conf->sent_packets);
+ return -EAGAIN;
+ }
+
+ /* Limit the check credit byte traffic */
+ usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1);
+ } while (timeout--);
+
+ dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n");
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_send_block(struct altera_cvp_conf *conf,
+ const u32 *data, size_t len)
+{
+ u32 mask, words = len / sizeof(u32);
+ int i, remainder;
+
+ for (i = 0; i < words; i++)
+ conf->write_data(conf, *data++);
+
+ /* write up to 3 trailing bytes, if any */
+ remainder = len % sizeof(u32);
+ if (remainder) {
+ mask = BIT(remainder * 8) - 1;
+ if (mask)
+ conf->write_data(conf, *data & mask);
+ }
+
+ return 0;
+}
+
static int altera_cvp_teardown(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_cvp_conf *conf = mgr->priv;
- struct pci_dev *pdev = conf->pci_dev;
int ret;
u32 val;
/* STEP 12 - reset START_XFER bit */
- pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
val &= ~VSE_CVP_PROG_CTRL_START_XFER;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
/* STEP 13 - reset CVP_CONFIG bit */
val &= ~VSE_CVP_PROG_CTRL_CONFIG;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
/*
* STEP 14
* - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
* writes to the HIP
*/
- altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
/* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
- ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10);
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
+ conf->priv->poll_time_us);
if (ret)
dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
@@ -163,7 +320,6 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
const char *buf, size_t count)
{
struct altera_cvp_conf *conf = mgr->priv;
- struct pci_dev *pdev = conf->pci_dev;
u32 iflags, val;
int ret;
@@ -183,7 +339,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
conf->numclks = 1; /* for uncompressed and unencrypted images */
/* STEP 1 - read CVP status and check CVP_EN flag */
- pci_read_config_dword(pdev, VSE_CVP_STATUS, &val);
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
if (!(val & VSE_CVP_STATUS_CVP_EN)) {
dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
return -ENODEV;
@@ -201,30 +357,42 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
* - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
*/
/* switch from fabric to PMA clock */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
/* set CVP mode */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val |= VSE_CVP_MODE_CTRL_CVP_MODE;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
/*
* STEP 3
* - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
*/
- altera_cvp_dummy_write(conf);
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ if (conf->priv->clear_state) {
+ ret = conf->priv->clear_state(conf);
+ if (ret) {
+ dev_err(&mgr->dev, "Problem clearing out state\n");
+ return ret;
+ }
+ }
+
+ conf->sent_packets = 0;
/* STEP 4 - set CVP_CONFIG bit */
- pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
/* request control block to begin transfer using CVP */
val |= VSE_CVP_PROG_CTRL_CONFIG;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
- /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */
+ /* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */
ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
- VSE_CVP_STATUS_CFG_RDY, 10);
+ VSE_CVP_STATUS_CFG_RDY,
+ conf->priv->poll_time_us);
if (ret) {
dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
return ret;
@@ -234,33 +402,28 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
* STEP 6
* - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
*/
- altera_cvp_dummy_write(conf);
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ if (altera_cvp_chkcfg) {
+ ret = altera_cvp_chk_error(mgr, 0);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+ }
/* STEP 7 - set START_XFER */
- pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
val |= VSE_CVP_PROG_CTRL_START_XFER;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
/* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
- val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
- val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
-
- return 0;
-}
-
-static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
-{
- struct altera_cvp_conf *conf = mgr->priv;
- u32 val;
-
- /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
- pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
- if (val & VSE_CVP_STATUS_CFG_ERR) {
- dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
- bytes);
- return -EPROTO;
+ if (conf->priv->switch_clk) {
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
}
return 0;
}
@@ -269,20 +432,32 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
size_t count)
{
struct altera_cvp_conf *conf = mgr->priv;
+ size_t done, remaining, len;
const u32 *data;
- size_t done, remaining;
int status = 0;
- u32 mask;
/* STEP 9 - write 32-bit data from RBF file to CVP data register */
data = (u32 *)buf;
remaining = count;
done = 0;
- while (remaining >= 4) {
- conf->write_data(conf, *data++);
- done += 4;
- remaining -= 4;
+ while (remaining) {
+ /* Use credit throttling if available */
+ if (conf->priv->wait_credit) {
+ status = conf->priv->wait_credit(mgr, done);
+ if (status) {
+ dev_err(&conf->pci_dev->dev,
+ "Wait Credit ERR: 0x%x\n", status);
+ return status;
+ }
+ }
+
+ len = min(conf->priv->block_size, remaining);
+ altera_cvp_send_block(conf, data, len);
+ data += len / sizeof(u32);
+ done += len;
+ remaining -= len;
+ conf->sent_packets++;
/*
* STEP 10 (optional) and STEP 11
@@ -300,11 +475,6 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
}
}
- /* write up to 3 trailing bytes, if any */
- mask = BIT(remaining * 8) - 1;
- if (mask)
- conf->write_data(conf, *data & mask);
-
if (altera_cvp_chkcfg)
status = altera_cvp_chk_error(mgr, count);
@@ -315,31 +485,30 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_cvp_conf *conf = mgr->priv;
- struct pci_dev *pdev = conf->pci_dev;
+ u32 mask, val;
int ret;
- u32 mask;
- u32 val;
ret = altera_cvp_teardown(mgr, info);
if (ret)
return ret;
/* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
- pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val);
+ altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val);
if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
return -EPROTO;
}
/* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
/* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
- ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US);
+ ret = altera_cvp_wait_status(conf, mask, mask,
+ conf->priv->user_time_us);
if (ret)
dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
@@ -353,6 +522,21 @@ static const struct fpga_manager_ops altera_cvp_ops = {
.write_complete = altera_cvp_write_complete,
};
+static const struct cvp_priv cvp_priv_v1 = {
+ .switch_clk = altera_cvp_dummy_write,
+ .block_size = ALTERA_CVP_V1_SIZE,
+ .poll_time_us = V1_POLL_TIMEOUT_US,
+ .user_time_us = TIMEOUT_US,
+};
+
+static const struct cvp_priv cvp_priv_v2 = {
+ .clear_state = altera_cvp_v2_clear_state,
+ .wait_credit = altera_cvp_v2_wait_for_credit,
+ .block_size = ALTERA_CVP_V2_SIZE,
+ .poll_time_us = V2_POLL_TIMEOUT_US,
+ .user_time_us = V2_USER_TIMEOUT_US,
+};
+
static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
@@ -394,22 +578,29 @@ static int altera_cvp_probe(struct pci_dev *pdev,
{
struct altera_cvp_conf *conf;
struct fpga_manager *mgr;
+ int ret, offset;
u16 cmd, val;
u32 regval;
- int ret;
+
+ /* Discover the Vendor Specific Offset for this device */
+ offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
+ if (!offset) {
+ dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
+ return -ENODEV;
+ }
/*
* First check if this is the expected FPGA device. PCI config
* space access works without enabling the PCI device, memory
* space access is enabled further down.
*/
- pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val);
+ pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
return -ENODEV;
}
- pci_read_config_dword(pdev, VSE_CVP_STATUS, &regval);
+ pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, &regval);
if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
dev_err(&pdev->dev,
"CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
@@ -421,6 +612,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
if (!conf)
return -ENOMEM;
+ conf->vsec_offset = offset;
+
/*
* Enable memory BAR access. We cannot use pci_enable_device() here
* because it will make the driver unusable with FPGA devices that
@@ -445,6 +638,11 @@ static int altera_cvp_probe(struct pci_dev *pdev,
conf->pci_dev = pdev;
conf->write_data = altera_cvp_write_data_iomem;
+ if (conf->vsec_offset == V1_VSEC_OFFSET)
+ conf->priv = &cvp_priv_v1;
+ else
+ conf->priv = &cvp_priv_v2;
+
conf->map = pci_iomap(pdev, CVP_BAR, 0);
if (!conf->map) {
dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
index b293d83143f1..99b9cc0e70f0 100644
--- a/drivers/fpga/altera-pr-ip-core-plat.c
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -32,7 +32,9 @@ static int alt_pr_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- return alt_pr_unregister(dev);
+ alt_pr_unregister(dev);
+
+ return 0;
}
static const struct of_device_id alt_pr_of_match[] = {
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index a7a3bf0b5202..2cf25fd5e897 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -201,15 +201,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
}
EXPORT_SYMBOL_GPL(alt_pr_register);
-int alt_pr_unregister(struct device *dev)
+void alt_pr_unregister(struct device *dev)
{
struct fpga_manager *mgr = dev_get_drvdata(dev);
dev_dbg(dev, "%s\n", __func__);
fpga_mgr_unregister(mgr);
-
- return 0;
}
EXPORT_SYMBOL_GPL(alt_pr_unregister);
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
new file mode 100644
index 000000000000..c1467ae1a6b6
--- /dev/null
+++ b/drivers/fpga/dfl-afu-error.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@linux.intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/uaccess.h>
+
+#include "dfl-afu.h"
+
+#define PORT_ERROR_MASK 0x8
+#define PORT_ERROR 0x10
+#define PORT_FIRST_ERROR 0x18
+#define PORT_MALFORMED_REQ0 0x20
+#define PORT_MALFORMED_REQ1 0x28
+
+#define ERROR_MASK GENMASK_ULL(63, 0)
+
+/* mask or unmask port errors by the error mask register. */
+static void __afu_port_err_mask(struct device *dev, bool mask)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
+}
+
+static void afu_port_err_mask(struct device *dev, bool mask)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ mutex_lock(&pdata->lock);
+ __afu_port_err_mask(dev, mask);
+ mutex_unlock(&pdata->lock);
+}
+
+/* clear port errors. */
+static int afu_port_err_clear(struct device *dev, u64 err)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *base_err, *base_hdr;
+ int ret = -EBUSY;
+ u64 v;
+
+ base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+
+ /*
+ * clear Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* if device is still in AP6 power state, can not clear any error. */
+ v = readq(base_hdr + PORT_HDR_STS);
+ if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ goto done;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __afu_port_disable(pdev);
+ if (ret)
+ goto done;
+
+ /* Mask all errors */
+ __afu_port_err_mask(dev, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ v = readq(base_err + PORT_ERROR);
+
+ if (v == err) {
+ writeq(v, base_err + PORT_ERROR);
+
+ v = readq(base_err + PORT_FIRST_ERROR);
+ writeq(v, base_err + PORT_FIRST_ERROR);
+ } else {
+ ret = -EINVAL;
+ }
+
+ /* Clear mask */
+ __afu_port_err_mask(dev, false);
+
+ /* Enable the Port by clear the reset */
+ __afu_port_enable(pdev);
+
+done:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 error;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ error = readq(base + PORT_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)error);
+}
+
+static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ u64 value;
+ int ret;
+
+ if (kstrtou64(buff, 0, &value))
+ return -EINVAL;
+
+ ret = afu_port_err_clear(dev, value);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(errors);
+
+static ssize_t first_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 error;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ error = readq(base + PORT_FIRST_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)error);
+}
+static DEVICE_ATTR_RO(first_error);
+
+static ssize_t first_malformed_req_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 req0, req1;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ req0 = readq(base + PORT_MALFORMED_REQ0);
+ req1 = readq(base + PORT_MALFORMED_REQ1);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%016llx%016llx\n",
+ (unsigned long long)req1, (unsigned long long)req0);
+}
+static DEVICE_ATTR_RO(first_malformed_req);
+
+static struct attribute *port_err_attrs[] = {
+ &dev_attr_errors.attr,
+ &dev_attr_first_error.attr,
+ &dev_attr_first_malformed_req.attr,
+ NULL,
+};
+
+static umode_t port_err_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group port_err_group = {
+ .name = "errors",
+ .attrs = port_err_attrs,
+ .is_visible = port_err_attrs_visible,
+};
+
+static int port_err_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ afu_port_err_mask(&pdev->dev, false);
+
+ return 0;
+}
+
+static void port_err_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ afu_port_err_mask(&pdev->dev, true);
+}
+
+const struct dfl_feature_id port_err_id_table[] = {
+ {.id = PORT_FEATURE_ID_ERROR,},
+ {0,}
+};
+
+const struct dfl_feature_ops port_err_ops = {
+ .init = port_err_init,
+ .uinit = port_err_uinit,
+};
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 02baa6a227c0..e4a34dc7947f 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -22,14 +22,17 @@
#include "dfl-afu.h"
/**
- * port_enable - enable a port
+ * __afu_port_enable - enable a port by clear reset
* @pdev: port platform device.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
- * port_enable function should only be used after port_disable function.
+ * __afu_port_enable function should only be used after __afu_port_disable
+ * function.
+ *
+ * The caller needs to hold lock for protection.
*/
-static void port_enable(struct platform_device *pdev)
+void __afu_port_enable(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
@@ -52,13 +55,14 @@ static void port_enable(struct platform_device *pdev)
#define RST_POLL_TIMEOUT 1000 /* us */
/**
- * port_disable - disable a port
+ * __afu_port_disable - disable a port by hold reset
* @pdev: port platform device.
*
- * Disable Port by setting the port soft reset bit, it puts the port into
- * reset.
+ * Disable Port by setting the port soft reset bit, it puts the port into reset.
+ *
+ * The caller needs to hold lock for protection.
*/
-static int port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
@@ -104,9 +108,9 @@ static int __port_reset(struct platform_device *pdev)
{
int ret;
- ret = port_disable(pdev);
+ ret = __afu_port_disable(pdev);
if (!ret)
- port_enable(pdev);
+ __afu_port_enable(pdev);
return ret;
}
@@ -141,27 +145,267 @@ id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(id);
-static const struct attribute *port_hdr_attrs[] = {
+static ssize_t
+ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_CTRL);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
+}
+
+static ssize_t
+ltr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool ltr;
+ u64 v;
+
+ if (kstrtobool(buf, &ltr))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_LATENCY;
+ v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
+ writeq(v, base + PORT_HDR_CTRL);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ltr);
+
+static ssize_t
+ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
+}
+
+static ssize_t
+ap1_event_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool clear;
+
+ if (kstrtobool(buf, &clear) || !clear)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ap1_event);
+
+static ssize_t
+ap2_event_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
+}
+
+static ssize_t
+ap2_event_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool clear;
+
+ if (kstrtobool(buf, &clear) || !clear)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ap2_event);
+
+static ssize_t
+power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
+}
+static DEVICE_ATTR_RO(power_state);
+
+static ssize_t
+userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freq_cmd;
+ void __iomem *base;
+
+ if (kstrtou64(buf, 0, &userclk_freq_cmd))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(userclk_freqcmd);
+
+static ssize_t
+userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqcntr_cmd;
+ void __iomem *base;
+
+ if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(userclk_freqcntrcmd);
+
+static ssize_t
+userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqsts;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
+}
+static DEVICE_ATTR_RO(userclk_freqsts);
+
+static ssize_t
+userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqcntrsts;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)userclk_freqcntrsts);
+}
+static DEVICE_ATTR_RO(userclk_freqcntrsts);
+
+static struct attribute *port_hdr_attrs[] = {
&dev_attr_id.attr,
+ &dev_attr_ltr.attr,
+ &dev_attr_ap1_event.attr,
+ &dev_attr_ap2_event.attr,
+ &dev_attr_power_state.attr,
+ &dev_attr_userclk_freqcmd.attr,
+ &dev_attr_userclk_freqcntrcmd.attr,
+ &dev_attr_userclk_freqsts.attr,
+ &dev_attr_userclk_freqcntrsts.attr,
NULL,
};
-static int port_hdr_init(struct platform_device *pdev,
- struct dfl_feature *feature)
+static umode_t port_hdr_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- dev_dbg(&pdev->dev, "PORT HDR Init.\n");
+ struct device *dev = kobj_to_dev(kobj);
+ umode_t mode = attr->mode;
+ void __iomem *base;
- port_reset(pdev);
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ if (dfl_feature_revision(base) > 0) {
+ /*
+ * userclk sysfs interfaces are only visible in case port
+ * revision is 0, as hardware with revision >0 doesn't
+ * support this.
+ */
+ if (attr == &dev_attr_userclk_freqcmd.attr ||
+ attr == &dev_attr_userclk_freqcntrcmd.attr ||
+ attr == &dev_attr_userclk_freqsts.attr ||
+ attr == &dev_attr_userclk_freqcntrsts.attr)
+ mode = 0;
+ }
- return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
+ return mode;
}
-static void port_hdr_uinit(struct platform_device *pdev,
- struct dfl_feature *feature)
+static const struct attribute_group port_hdr_group = {
+ .attrs = port_hdr_attrs,
+ .is_visible = port_hdr_attrs_visible,
+};
+
+static int port_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
{
- dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
+ port_reset(pdev);
- sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
+ return 0;
}
static long
@@ -185,9 +429,13 @@ port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
return ret;
}
+static const struct dfl_feature_id port_hdr_id_table[] = {
+ {.id = PORT_FEATURE_ID_HEADER,},
+ {0,}
+};
+
static const struct dfl_feature_ops port_hdr_ops = {
.init = port_hdr_init,
- .uinit = port_hdr_uinit,
.ioctl = port_hdr_ioctl,
};
@@ -214,52 +462,91 @@ afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(afu_id);
-static const struct attribute *port_afu_attrs[] = {
+static struct attribute *port_afu_attrs[] = {
&dev_attr_afu_id.attr,
NULL
};
+static umode_t port_afu_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group port_afu_group = {
+ .attrs = port_afu_attrs,
+ .is_visible = port_afu_attrs_visible,
+};
+
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
struct resource *res = &pdev->resource[feature->resource_index];
- int ret;
- dev_dbg(&pdev->dev, "PORT AFU Init.\n");
+ return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_AFU,
+ resource_size(res), res->start,
+ DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE);
+}
- ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
- DFL_PORT_REGION_INDEX_AFU, resource_size(res),
- res->start, DFL_PORT_REGION_READ |
- DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
- if (ret)
- return ret;
+static const struct dfl_feature_id port_afu_id_table[] = {
+ {.id = PORT_FEATURE_ID_AFU,},
+ {0,}
+};
- return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
-}
+static const struct dfl_feature_ops port_afu_ops = {
+ .init = port_afu_init,
+};
-static void port_afu_uinit(struct platform_device *pdev,
- struct dfl_feature *feature)
+static int port_stp_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
{
- dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
+ struct resource *res = &pdev->resource[feature->resource_index];
- sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
+ return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_STP,
+ resource_size(res), res->start,
+ DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE);
}
-static const struct dfl_feature_ops port_afu_ops = {
- .init = port_afu_init,
- .uinit = port_afu_uinit,
+static const struct dfl_feature_id port_stp_id_table[] = {
+ {.id = PORT_FEATURE_ID_STP,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_stp_ops = {
+ .init = port_stp_init,
};
static struct dfl_feature_driver port_feature_drvs[] = {
{
- .id = PORT_FEATURE_ID_HEADER,
+ .id_table = port_hdr_id_table,
.ops = &port_hdr_ops,
},
{
- .id = PORT_FEATURE_ID_AFU,
+ .id_table = port_afu_id_table,
.ops = &port_afu_ops,
},
{
+ .id_table = port_err_id_table,
+ .ops = &port_err_ops,
+ },
+ {
+ .id_table = port_stp_id_table,
+ .ops = &port_stp_ops,
+ },
+ {
.ops = NULL,
}
};
@@ -545,9 +832,9 @@ static int port_enable_set(struct platform_device *pdev, bool enable)
mutex_lock(&pdata->lock);
if (enable)
- port_enable(pdev);
+ __afu_port_enable(pdev);
else
- ret = port_disable(pdev);
+ ret = __afu_port_disable(pdev);
mutex_unlock(&pdata->lock);
return ret;
@@ -599,9 +886,17 @@ static int afu_remove(struct platform_device *pdev)
return 0;
}
+static const struct attribute_group *afu_dev_groups[] = {
+ &port_hdr_group,
+ &port_afu_group,
+ &port_err_group,
+ NULL
+};
+
static struct platform_driver afu_driver = {
.driver = {
- .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .dev_groups = afu_dev_groups,
},
.probe = afu_probe,
.remove = afu_remove,
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 0c7630ae3cda..576e94960086 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -79,6 +79,10 @@ struct dfl_afu {
struct dfl_feature_platform_data *pdata;
};
+/* hold pdata->lock when call __afu_port_enable/disable */
+void __afu_port_enable(struct platform_device *pdev);
+int __afu_port_disable(struct platform_device *pdev);
+
void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
@@ -97,4 +101,9 @@ int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
struct dfl_afu_dma_region *
afu_dma_region_find(struct dfl_feature_platform_data *pdata,
u64 iova, u64 size);
+
+extern const struct dfl_feature_ops port_err_ops;
+extern const struct dfl_feature_id port_err_id_table[];
+extern const struct attribute_group port_err_group;
+
#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
new file mode 100644
index 000000000000..f897d414b923
--- /dev/null
+++ b/drivers/fpga/dfl-fme-error.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine Error Management
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/uaccess.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+#define FME_ERROR_MASK 0x8
+#define FME_ERROR 0x10
+#define MBP_ERROR BIT_ULL(6)
+#define PCIE0_ERROR_MASK 0x18
+#define PCIE0_ERROR 0x20
+#define PCIE1_ERROR_MASK 0x28
+#define PCIE1_ERROR 0x30
+#define FME_FIRST_ERROR 0x38
+#define FME_NEXT_ERROR 0x40
+#define RAS_NONFAT_ERROR_MASK 0x48
+#define RAS_NONFAT_ERROR 0x50
+#define RAS_CATFAT_ERROR_MASK 0x58
+#define RAS_CATFAT_ERROR 0x60
+#define RAS_ERROR_INJECT 0x68
+#define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
+
+#define ERROR_MASK GENMASK_ULL(63, 0)
+
+static ssize_t pcie0_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + PCIE0_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t pcie0_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ int ret = 0;
+ u64 v, val;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
+
+ v = readq(base + PCIE0_ERROR);
+ if (val == v)
+ writeq(v, base + PCIE0_ERROR);
+ else
+ ret = -EINVAL;
+
+ writeq(0ULL, base + PCIE0_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(pcie0_errors);
+
+static ssize_t pcie1_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + PCIE1_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t pcie1_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ int ret = 0;
+ u64 v, val;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
+
+ v = readq(base + PCIE1_ERROR);
+ if (val == v)
+ writeq(v, base + PCIE1_ERROR);
+ else
+ ret = -EINVAL;
+
+ writeq(0ULL, base + PCIE1_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(pcie1_errors);
+
+static ssize_t nonfatal_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)readq(base + RAS_NONFAT_ERROR));
+}
+static DEVICE_ATTR_RO(nonfatal_errors);
+
+static ssize_t catfatal_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)readq(base + RAS_CATFAT_ERROR));
+}
+static DEVICE_ATTR_RO(catfatal_errors);
+
+static ssize_t inject_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + RAS_ERROR_INJECT);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
+}
+
+static ssize_t inject_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u8 inject_error;
+ u64 v;
+
+ if (kstrtou8(buf, 0, &inject_error))
+ return -EINVAL;
+
+ if (inject_error & ~INJECT_ERROR_MASK)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + RAS_ERROR_INJECT);
+ v &= ~INJECT_ERROR_MASK;
+ v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
+ writeq(v, base + RAS_ERROR_INJECT);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_errors);
+
+static ssize_t fme_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t fme_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v, val;
+ int ret = 0;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
+
+ v = readq(base + FME_ERROR);
+ if (val == v)
+ writeq(v, base + FME_ERROR);
+ else
+ ret = -EINVAL;
+
+ /* Workaround: disable MBP_ERROR if feature revision is 0 */
+ writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
+ base + FME_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(fme_errors);
+
+static ssize_t first_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_FIRST_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+static DEVICE_ATTR_RO(first_error);
+
+static ssize_t next_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_NEXT_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+static DEVICE_ATTR_RO(next_error);
+
+static struct attribute *fme_global_err_attrs[] = {
+ &dev_attr_pcie0_errors.attr,
+ &dev_attr_pcie1_errors.attr,
+ &dev_attr_nonfatal_errors.attr,
+ &dev_attr_catfatal_errors.attr,
+ &dev_attr_inject_errors.attr,
+ &dev_attr_fme_errors.attr,
+ &dev_attr_first_error.attr,
+ &dev_attr_next_error.attr,
+ NULL,
+};
+
+static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group fme_global_err_group = {
+ .name = "errors",
+ .attrs = fme_global_err_attrs,
+ .is_visible = fme_global_err_attrs_visible,
+};
+
+static void fme_err_mask(struct device *dev, bool mask)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+
+ /* Workaround: keep MBP_ERROR always masked if revision is 0 */
+ if (dfl_feature_revision(base))
+ writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
+ else
+ writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
+
+ writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
+
+ mutex_unlock(&pdata->lock);
+}
+
+static int fme_global_err_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ fme_err_mask(&pdev->dev, false);
+
+ return 0;
+}
+
+static void fme_global_err_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ fme_err_mask(&pdev->dev, true);
+}
+
+const struct dfl_feature_id fme_global_err_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_ERR,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_global_err_ops = {
+ .init = fme_global_err_init,
+ .uinit = fme_global_err_uinit,
+};
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 086ad2420ade..4d78e182878f 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <linux/fpga-dfl.h>
#include "dfl.h"
@@ -72,50 +73,126 @@ static ssize_t bitstream_metadata_show(struct device *dev,
}
static DEVICE_ATTR_RO(bitstream_metadata);
-static const struct attribute *fme_hdr_attrs[] = {
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t fabric_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
+}
+static DEVICE_ATTR_RO(fabric_version);
+
+static ssize_t socket_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
+}
+static DEVICE_ATTR_RO(socket_id);
+
+static struct attribute *fme_hdr_attrs[] = {
&dev_attr_ports_num.attr,
&dev_attr_bitstream_id.attr,
&dev_attr_bitstream_metadata.attr,
+ &dev_attr_cache_size.attr,
+ &dev_attr_fabric_version.attr,
+ &dev_attr_socket_id.attr,
NULL,
};
-static int fme_hdr_init(struct platform_device *pdev,
- struct dfl_feature *feature)
+static const struct attribute_group fme_hdr_group = {
+ .attrs = fme_hdr_attrs,
+};
+
+static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
{
- void __iomem *base = feature->ioaddr;
- int ret;
+ struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ int port_id;
- dev_dbg(&pdev->dev, "FME HDR Init.\n");
- dev_dbg(&pdev->dev, "FME cap %llx.\n",
- (unsigned long long)readq(base + FME_HDR_CAP));
+ if (get_user(port_id, (int __user *)arg))
+ return -EFAULT;
- ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs);
- if (ret)
- return ret;
+ return dfl_fpga_cdev_release_port(cdev, port_id);
+}
- return 0;
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ int port_id;
+
+ if (get_user(port_id, (int __user *)arg))
+ return -EFAULT;
+
+ return dfl_fpga_cdev_assign_port(cdev, port_id);
}
-static void fme_hdr_uinit(struct platform_device *pdev,
- struct dfl_feature *feature)
+static long fme_hdr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
{
- dev_dbg(&pdev->dev, "FME HDR UInit.\n");
- sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_RELEASE:
+ return fme_hdr_ioctl_release_port(pdata, arg);
+ case DFL_FPGA_FME_PORT_ASSIGN:
+ return fme_hdr_ioctl_assign_port(pdata, arg);
+ }
+
+ return -ENODEV;
}
+static const struct dfl_feature_id fme_hdr_id_table[] = {
+ {.id = FME_FEATURE_ID_HEADER,},
+ {0,}
+};
+
static const struct dfl_feature_ops fme_hdr_ops = {
- .init = fme_hdr_init,
- .uinit = fme_hdr_uinit,
+ .ioctl = fme_hdr_ioctl,
};
static struct dfl_feature_driver fme_feature_drvs[] = {
{
- .id = FME_FEATURE_ID_HEADER,
+ .id_table = fme_hdr_id_table,
.ops = &fme_hdr_ops,
},
{
- .id = FME_FEATURE_ID_PR_MGMT,
- .ops = &pr_mgmt_ops,
+ .id_table = fme_pr_mgmt_id_table,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .id_table = fme_global_err_id_table,
+ .ops = &fme_global_err_ops,
},
{
.ops = NULL,
@@ -263,9 +340,16 @@ static int fme_remove(struct platform_device *pdev)
return 0;
}
+static const struct attribute_group *fme_dev_groups[] = {
+ &fme_hdr_group,
+ &fme_global_err_group,
+ NULL
+};
+
static struct platform_driver fme_driver = {
.driver = {
- .name = DFL_FPGA_FEATURE_DEV_FME,
+ .name = DFL_FPGA_FEATURE_DEV_FME,
+ .dev_groups = fme_dev_groups,
},
.probe = fme_probe,
.remove = fme_remove,
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index 3c71dc3faaf5..a233a53db708 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -470,7 +470,12 @@ static long fme_pr_ioctl(struct platform_device *pdev,
return ret;
}
-const struct dfl_feature_ops pr_mgmt_ops = {
+const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_PR_MGMT,},
+ {0}
+};
+
+const struct dfl_feature_ops fme_pr_mgmt_ops = {
.init = pr_mgmt_init,
.uinit = pr_mgmt_uinit,
.ioctl = fme_pr_ioctl,
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 5394a216c5c0..6685c8ef965b 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -33,6 +33,10 @@ struct dfl_fme {
struct dfl_feature_platform_data *pdata;
};
-extern const struct dfl_feature_ops pr_mgmt_ops;
+extern const struct dfl_feature_ops fme_pr_mgmt_ops;
+extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
+extern const struct dfl_feature_ops fme_global_err_ops;
+extern const struct dfl_feature_id fme_global_err_id_table[];
+extern const struct attribute_group fme_global_err_group;
#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 66b5720582bb..89ca292236ad 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -223,8 +223,43 @@ disable_error_report_exit:
return ret;
}
+static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_cdev *cdev = drvdata->cdev;
+ int ret = 0;
+
+ if (!num_vfs) {
+ /*
+ * disable SRIOV and then put released ports back to default
+ * PF access mode.
+ */
+ pci_disable_sriov(pcidev);
+
+ dfl_fpga_cdev_config_ports_pf(cdev);
+
+ } else {
+ /*
+ * before enable SRIOV, put released ports into VF access mode
+ * first of all.
+ */
+ ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = pci_enable_sriov(pcidev, num_vfs);
+ if (ret)
+ dfl_fpga_cdev_config_ports_pf(cdev);
+ }
+
+ return ret;
+}
+
static void cci_pci_remove(struct pci_dev *pcidev)
{
+ if (dev_is_pf(&pcidev->dev))
+ cci_pci_sriov_configure(pcidev, 0);
+
cci_remove_feature_devs(pcidev);
pci_disable_pcie_error_reporting(pcidev);
}
@@ -234,6 +269,7 @@ static struct pci_driver cci_pci_driver = {
.id_table = cci_pcie_id_tbl,
.probe = cci_pci_probe,
.remove = cci_pci_remove,
+ .sriov_configure = cci_pci_sriov_configure,
};
module_pci_driver(cci_pci_driver);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 4b66aaa32b5a..96a2b8274a33 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -231,16 +231,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
*/
int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
{
- struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
- int port_id;
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fpga_port_ops *port_ops;
+
+ if (pdata->id != FEATURE_DEV_ID_UNUSED)
+ return pdata->id == *(int *)pport_id;
+ port_ops = dfl_fpga_port_ops_get(pdev);
if (!port_ops || !port_ops->get_id)
return 0;
- port_id = port_ops->get_id(pdev);
+ pdata->id = port_ops->get_id(pdev);
dfl_fpga_port_ops_put(port_ops);
- return port_id == *(int *)pport_id;
+ return pdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -255,7 +259,8 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
dfl_fpga_dev_for_each_feature(pdata, feature)
if (feature->ops) {
- feature->ops->uinit(pdev, feature);
+ if (feature->ops->uinit)
+ feature->ops->uinit(pdev, feature);
feature->ops = NULL;
}
}
@@ -266,17 +271,34 @@ static int dfl_feature_instance_init(struct platform_device *pdev,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
- int ret;
+ int ret = 0;
- ret = drv->ops->init(pdev, feature);
- if (ret)
- return ret;
+ if (drv->ops->init) {
+ ret = drv->ops->init(pdev, feature);
+ if (ret)
+ return ret;
+ }
feature->ops = drv->ops;
return ret;
}
+static bool dfl_feature_drv_match(struct dfl_feature *feature,
+ struct dfl_feature_driver *driver)
+{
+ const struct dfl_feature_id *ids = driver->id_table;
+
+ if (ids) {
+ while (ids->id) {
+ if (ids->id == feature->id)
+ return true;
+ ids++;
+ }
+ }
+ return false;
+}
+
/**
* dfl_fpga_dev_feature_init - init for sub features of dfl feature device
* @pdev: feature device.
@@ -297,8 +319,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
while (drv->ops) {
dfl_fpga_dev_for_each_feature(pdata, feature) {
- /* match feature and drv using id */
- if (feature->id == drv->id) {
+ if (dfl_feature_drv_match(feature, drv)) {
ret = dfl_feature_instance_init(pdev, pdata,
feature, drv);
if (ret)
@@ -474,6 +495,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
pdata->dev = fdev;
pdata->num = binfo->feature_num;
pdata->dfl_cdev = binfo->cdev;
+ pdata->id = FEATURE_DEV_ID_UNUSED;
mutex_init(&pdata->lock);
lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
@@ -973,25 +995,27 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
struct dfl_feature_platform_data *pdata, *ptmp;
- remove_feature_devs(cdev);
-
mutex_lock(&cdev->lock);
- if (cdev->fme_dev) {
- /* the fme should be unregistered. */
- WARN_ON(device_is_registered(cdev->fme_dev));
+ if (cdev->fme_dev)
put_device(cdev->fme_dev);
- }
list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
struct platform_device *port_dev = pdata->dev;
- /* the port should be unregistered. */
- WARN_ON(device_is_registered(&port_dev->dev));
+ /* remove released ports */
+ if (!device_is_registered(&port_dev->dev)) {
+ dfl_id_free(feature_dev_id_type(port_dev),
+ port_dev->id);
+ platform_device_put(port_dev);
+ }
+
list_del(&pdata->node);
put_device(&port_dev->dev);
}
mutex_unlock(&cdev->lock);
+ remove_feature_devs(cdev);
+
fpga_region_unregister(cdev->region);
devm_kfree(cdev->parent, cdev);
}
@@ -1042,6 +1066,170 @@ static int __init dfl_fpga_init(void)
return ret;
}
+/**
+ * dfl_fpga_cdev_release_port - release a port platform device
+ *
+ * @cdev: parent container device.
+ * @port_id: id of the port platform device.
+ *
+ * This function allows user to release a port platform device. This is a
+ * mandatory step before turn a port from PF into VF for SRIOV support.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
+{
+ struct platform_device *port_pdev;
+ int ret = -ENODEV;
+
+ mutex_lock(&cdev->lock);
+ port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ goto unlock_exit;
+
+ if (!device_is_registered(&port_pdev->dev)) {
+ ret = -EBUSY;
+ goto put_dev_exit;
+ }
+
+ ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
+ if (ret)
+ goto put_dev_exit;
+
+ platform_device_del(port_pdev);
+ cdev->released_port_num++;
+put_dev_exit:
+ put_device(&port_pdev->dev);
+unlock_exit:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
+
+/**
+ * dfl_fpga_cdev_assign_port - assign a port platform device back
+ *
+ * @cdev: parent container device.
+ * @port_id: id of the port platform device.
+ *
+ * This function allows user to assign a port platform device back. This is
+ * a mandatory step after disable SRIOV support.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
+{
+ struct platform_device *port_pdev;
+ int ret = -ENODEV;
+
+ mutex_lock(&cdev->lock);
+ port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ goto unlock_exit;
+
+ if (device_is_registered(&port_pdev->dev)) {
+ ret = -EBUSY;
+ goto put_dev_exit;
+ }
+
+ ret = platform_device_add(port_pdev);
+ if (ret)
+ goto put_dev_exit;
+
+ dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
+ cdev->released_port_num--;
+put_dev_exit:
+ put_device(&port_pdev->dev);
+unlock_exit:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
+
+static void config_port_access_mode(struct device *fme_dev, int port_id,
+ bool is_vf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_PORT_OFST(port_id));
+
+ v &= ~FME_PORT_OFST_ACC_CTRL;
+ v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
+ is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
+
+ writeq(v, base + FME_HDR_PORT_OFST(port_id));
+}
+
+#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
+#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
+
+/**
+ * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
+ *
+ * @cdev: parent container device.
+ *
+ * This function is needed in sriov configuration routine. It could be used to
+ * configure the all released ports from VF access mode to PF.
+ */
+void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ mutex_lock(&cdev->lock);
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ if (device_is_registered(&pdata->dev->dev))
+ continue;
+
+ config_port_pf_mode(cdev->fme_dev, pdata->id);
+ }
+ mutex_unlock(&cdev->lock);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
+
+/**
+ * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
+ *
+ * @cdev: parent container device.
+ * @num_vfs: VF device number.
+ *
+ * This function is needed in sriov configuration routine. It could be used to
+ * configure the released ports from PF access mode to VF.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
+{
+ struct dfl_feature_platform_data *pdata;
+ int ret = 0;
+
+ mutex_lock(&cdev->lock);
+ /*
+ * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
+ * device, so if released port number doesn't match VF device number,
+ * then reject the request with -EINVAL error code.
+ */
+ if (cdev->released_port_num != num_vfs) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ if (device_is_registered(&pdata->dev->dev))
+ continue;
+
+ config_port_vf_mode(cdev->fme_dev, pdata->id);
+ }
+done:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
+
static void __exit dfl_fpga_exit(void)
{
dfl_chardev_uinit();
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index a8b869e9e5b7..9f0e656de720 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -30,8 +30,8 @@
/* plus one for fme device */
#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
-/* Reserved 0x0 for Header Group Register and 0xff for AFU */
-#define FEATURE_ID_FIU_HEADER 0x0
+/* Reserved 0xfe for Header Group Register and 0xff for AFU */
+#define FEATURE_ID_FIU_HEADER 0xfe
#define FEATURE_ID_AFU 0xff
#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
@@ -119,6 +119,11 @@
#define PORT_HDR_NEXT_AFU NEXT_AFU
#define PORT_HDR_CAP 0x30
#define PORT_HDR_CTRL 0x38
+#define PORT_HDR_STS 0x40
+#define PORT_HDR_USRCLK_CMD0 0x50
+#define PORT_HDR_USRCLK_CMD1 0x58
+#define PORT_HDR_USRCLK_STS0 0x60
+#define PORT_HDR_USRCLK_STS1 0x68
/* Port Capability Register Bitfield */
#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
@@ -130,6 +135,16 @@
/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
#define PORT_CTRL_LATENCY BIT_ULL(2)
#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
+
+/* Port Status Register Bitfield */
+#define PORT_STS_AP2_EVT BIT_ULL(13) /* AP2 event detected */
+#define PORT_STS_AP1_EVT BIT_ULL(12) /* AP1 event detected */
+#define PORT_STS_PWR_STATE GENMASK_ULL(11, 8) /* AFU power states */
+#define PORT_STS_PWR_STATE_NORM 0
+#define PORT_STS_PWR_STATE_AP1 1 /* 50% throttling */
+#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */
+#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -154,13 +169,22 @@ void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
/**
- * struct dfl_feature_driver - sub feature's driver
+ * struct dfl_feature_id - dfl private feature id
*
- * @id: sub feature id.
- * @ops: ops of this sub feature.
+ * @id: unique dfl private feature id.
*/
-struct dfl_feature_driver {
+struct dfl_feature_id {
u64 id;
+};
+
+/**
+ * struct dfl_feature_driver - dfl private feature driver
+ *
+ * @id_table: id_table for dfl private features supported by this driver.
+ * @ops: ops of this dfl private feature driver.
+ */
+struct dfl_feature_driver {
+ const struct dfl_feature_id *id_table;
const struct dfl_feature_ops *ops;
};
@@ -183,6 +207,8 @@ struct dfl_feature {
#define DEV_STATUS_IN_USE 0
+#define FEATURE_DEV_ID_UNUSED (-1)
+
/**
* struct dfl_feature_platform_data - platform data for feature devices
*
@@ -191,6 +217,7 @@ struct dfl_feature {
* @cdev: cdev of feature dev.
* @dev: ptr to platform device linked with this platform data.
* @dfl_cdev: ptr to container device.
+ * @id: id used for this feature device.
* @disable_count: count for port disable.
* @num: number for sub features.
* @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
@@ -203,6 +230,7 @@ struct dfl_feature_platform_data {
struct cdev cdev;
struct platform_device *dev;
struct dfl_fpga_cdev *dfl_cdev;
+ int id;
unsigned int disable_count;
unsigned long dev_status;
void *private;
@@ -331,6 +359,11 @@ static inline bool dfl_feature_is_port(void __iomem *base)
(FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
}
+static inline u8 dfl_feature_revision(void __iomem *base)
+{
+ return (u8)FIELD_GET(DFH_REVISION, readq(base + DFH));
+}
+
/**
* struct dfl_fpga_enum_info - DFL FPGA enumeration information
*
@@ -373,6 +406,7 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
* @fme_dev: FME feature device under this container device.
* @lock: mutex lock to protect the port device list.
* @port_dev_list: list of all port feature devices under this container device.
+ * @released_port_num: released port number under this container device.
*/
struct dfl_fpga_cdev {
struct device *parent;
@@ -380,6 +414,7 @@ struct dfl_fpga_cdev {
struct device *fme_dev;
struct mutex lock;
struct list_head port_dev_list;
+ int released_port_num;
};
struct dfl_fpga_cdev *
@@ -407,4 +442,9 @@ dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
return pdev;
}
+
+int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
+int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
+void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
+int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
#endif /* __FPGA_DFL_H */
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 2463aa7ab4f6..96544b348c27 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -646,24 +646,23 @@ static int debug_remove(struct amba_device *adev)
return 0;
}
+static const struct amba_cs_uci_id uci_id_debug[] = {
+ {
+ /* CPU Debug UCI data */
+ .devarch = 0x47706a15,
+ .devarch_mask = 0xfff0ffff,
+ .devtype = 0x00000015,
+ }
+};
+
static const struct amba_id debug_ids[] = {
- { /* Debug for Cortex-A53 */
- .id = 0x000bbd03,
- .mask = 0x000fffff,
- },
- { /* Debug for Cortex-A57 */
- .id = 0x000bbd07,
- .mask = 0x000fffff,
- },
- { /* Debug for Cortex-A72 */
- .id = 0x000bbd08,
- .mask = 0x000fffff,
- },
- { /* Debug for Cortex-A73 */
- .id = 0x000bbd09,
- .mask = 0x000fffff,
- },
- { 0, 0 },
+ CS_AMBA_ID(0x000bbd03), /* Cortex-A53 */
+ CS_AMBA_ID(0x000bbd07), /* Cortex-A57 */
+ CS_AMBA_ID(0x000bbd08), /* Cortex-A72 */
+ CS_AMBA_ID(0x000bbd09), /* Cortex-A73 */
+ CS_AMBA_UCI_ID(0x000f0205, uci_id_debug), /* Qualcomm Kryo */
+ CS_AMBA_UCI_ID(0x000f0211, uci_id_debug), /* Qualcomm Kryo */
+ {},
};
static struct amba_driver debug_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a0365e23678e..219c10eb752c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -296,11 +296,8 @@ static ssize_t mode_store(struct device *dev,
spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
-
- if (config->mode & ETM_MODE_EXCLUDE)
- etm4_set_mode_exclude(drvdata, true);
- else
- etm4_set_mode_exclude(drvdata, false);
+ etm4_set_mode_exclude(drvdata,
+ config->mode & ETM_MODE_EXCLUDE ? true : false);
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
@@ -999,10 +996,8 @@ static ssize_t addr_range_store(struct device *dev,
* Program include or exclude control bits for vinst or vdata
* whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
*/
- if (config->mode & ETM_MODE_EXCLUDE)
- etm4_set_mode_exclude(drvdata, true);
- else
- etm4_set_mode_exclude(drvdata, false);
+ etm4_set_mode_exclude(drvdata,
+ config->mode & ETM_MODE_EXCLUDE ? true : false);
spin_unlock(&drvdata->spinlock);
return size;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 7bcac8896fc1..a128b5063f46 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -34,7 +34,8 @@
#include "coresight-etm-perf.h"
static int boot_enable;
-module_param_named(boot_enable, boot_enable, int, S_IRUGO);
+module_param(boot_enable, int, 0444);
+MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
/* The number of ETMv4 currently registered */
static int etm4_count;
@@ -47,7 +48,7 @@ static enum cpuhp_state hp_online;
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
- /* Writing any value to ETMOSLAR unlocks the trace registers */
+ /* Writing 0 to TRCOSLAR unlocks the trace registers */
writel_relaxed(0x0, drvdata->base + TRCOSLAR);
drvdata->os_unlock = true;
isb();
@@ -188,6 +189,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
done:
CS_LOCK(drvdata->base);
@@ -453,8 +461,12 @@ static void etm4_disable_hw(void *info)
/* EN, bit[0] Trace unit enable bit */
control &= ~0x1;
- /* make sure everything completes before disabling */
- mb();
+ /*
+ * Make sure everything completes before disabling, as recommended
+ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+ * SSTATUS") of ARM IHI 0064D
+ */
+ dsb(sy);
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
@@ -1047,10 +1059,8 @@ static int etm4_starting_cpu(unsigned int cpu)
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (!etmdrvdata[cpu]->os_unlock) {
+ if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
- etmdrvdata[cpu]->os_unlock = true;
- }
if (local_read(&etmdrvdata[cpu]->mode))
etm4_enable_hw(etmdrvdata[cpu]);
@@ -1192,11 +1202,15 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
};
static const struct amba_id etm4_ids[] = {
- CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
- CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
- CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
- CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
- CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */
+ CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
+ CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
+ CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
+ CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index fa97cb9ab4f9..05f7896c3a01 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -5,6 +5,7 @@
* Description: CoreSight Funnel driver
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -192,7 +193,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
- pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n");
+ dev_warn_once(dev, "Uses OBSOLETE CoreSight funnel binding\n");
desc.name = coresight_alloc_device_name(&funnel_devs, dev);
if (!desc.name)
@@ -302,11 +303,19 @@ static const struct of_device_id static_funnel_match[] = {
{}
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id static_funnel_ids[] = {
+ {"ARMHC9FE", 0},
+ {},
+};
+#endif
+
static struct platform_driver static_funnel_driver = {
.probe = static_funnel_probe,
.driver = {
.name = "coresight-static-funnel",
.of_match_table = static_funnel_match,
+ .acpi_match_table = ACPI_PTR(static_funnel_ids),
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 61d7f9ff054d..82e563cdc879 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -185,11 +185,11 @@ static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
}
/* coresight AMBA ID, full UCI structure: id table entry. */
-#define CS_AMBA_UCI_ID(pid, uci_ptr) \
- { \
- .id = pid, \
- .mask = 0x000fffff, \
- .data = uci_ptr \
+#define CS_AMBA_UCI_ID(pid, uci_ptr) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ .data = (void *)uci_ptr \
}
/* extract the data value from a UCI structure given amba_id pointer. */
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b7d6d59d56db..b29ba640eb25 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -184,7 +184,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
- pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n");
+ dev_warn_once(dev,
+ "Uses OBSOLETE CoreSight replicator binding\n");
desc.name = coresight_alloc_device_name(&replicator_devs, dev);
if (!desc.name)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 23b7ff00af5c..807416b75ecc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -479,30 +479,11 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
* traces.
*/
if (!buf->snapshot && to_read > handle->size) {
- u32 mask = 0;
-
- /*
- * The value written to RRP must be byte-address aligned to
- * the width of the trace memory databus _and_ to a frame
- * boundary (16 byte), whichever is the biggest. For example,
- * for 32-bit, 64-bit and 128-bit wide trace memory, the four
- * LSBs must be 0s. For 256-bit wide trace memory, the five
- * LSBs must be 0s.
- */
- switch (drvdata->memwidth) {
- case TMC_MEM_INTF_WIDTH_32BITS:
- case TMC_MEM_INTF_WIDTH_64BITS:
- case TMC_MEM_INTF_WIDTH_128BITS:
- mask = GENMASK(31, 4);
- break;
- case TMC_MEM_INTF_WIDTH_256BITS:
- mask = GENMASK(31, 5);
- break;
- }
+ u32 mask = tmc_get_memwidth_mask(drvdata);
/*
* Make sure the new size is aligned in accordance with the
- * requirement explained above.
+ * requirement explained in function tmc_get_memwidth_mask().
*/
to_read = handle->size & mask;
/* Move the RAM read pointer up */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 17006705287a..625882bc8b08 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -871,6 +871,7 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
return ERR_PTR(rc);
}
+ refcount_set(&etr_buf->refcount, 1);
dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
(unsigned long)size >> 10, etr_buf->mode);
return etr_buf;
@@ -927,15 +928,24 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
rrp = tmc_read_rrp(drvdata);
rwp = tmc_read_rwp(drvdata);
status = readl_relaxed(drvdata->base + TMC_STS);
+
+ /*
+ * If there were memory errors in the session, truncate the
+ * buffer.
+ */
+ if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) {
+ dev_dbg(&drvdata->csdev->dev,
+ "tmc memory error detected, truncating buffer\n");
+ etr_buf->len = 0;
+ etr_buf->full = 0;
+ return;
+ }
+
etr_buf->full = status & TMC_STS_FULL;
WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
etr_buf->ops->sync(etr_buf, rrp, rwp);
-
- /* Insert barrier packets at the beginning, if there was an overflow */
- if (etr_buf->full)
- tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
}
static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
@@ -1072,6 +1082,13 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
drvdata->sysfs_buf = NULL;
} else {
tmc_sync_etr_buf(drvdata);
+ /*
+ * Insert barrier packets at the beginning, if there was
+ * an overflow.
+ */
+ if (etr_buf->full)
+ tmc_etr_buf_insert_barrier_packet(etr_buf,
+ etr_buf->offset);
}
}
@@ -1263,8 +1280,6 @@ retry:
if (IS_ERR(etr_buf))
return etr_buf;
- refcount_set(&etr_buf->refcount, 1);
-
/* Now that we have a buffer, add it to the IDR. */
mutex_lock(&drvdata->idr_mutex);
ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
@@ -1291,19 +1306,11 @@ get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
struct perf_event *event, int nr_pages,
void **pages, bool snapshot)
{
- struct etr_buf *etr_buf;
-
/*
* In per-thread mode the etr_buf isn't shared, so just go ahead
* with memory allocation.
*/
- etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
- if (IS_ERR(etr_buf))
- goto out;
-
- refcount_set(&etr_buf->refcount, 1);
-out:
- return etr_buf;
+ return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
}
static struct etr_buf *
@@ -1410,10 +1417,12 @@ free_etr_perf_buffer:
* tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
* buffer to the perf ring buffer.
*/
-static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
+static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
+ unsigned long src_offset,
+ unsigned long to_copy)
{
- long bytes, to_copy;
- long pg_idx, pg_offset, src_offset;
+ long bytes;
+ long pg_idx, pg_offset;
unsigned long head = etr_perf->head;
char **dst_pages, *src_buf;
struct etr_buf *etr_buf = etr_perf->etr_buf;
@@ -1422,8 +1431,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
pg_idx = head >> PAGE_SHIFT;
pg_offset = head & (PAGE_SIZE - 1);
dst_pages = (char **)etr_perf->pages;
- src_offset = etr_buf->offset;
- to_copy = etr_buf->len;
while (to_copy > 0) {
/*
@@ -1434,6 +1441,8 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
* 3) what is available in the destination page.
* in one iteration.
*/
+ if (src_offset >= etr_buf->size)
+ src_offset -= etr_buf->size;
bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
&src_buf);
if (WARN_ON_ONCE(bytes <= 0))
@@ -1454,8 +1463,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
/* Move source pointers */
src_offset += bytes;
- if (src_offset >= etr_buf->size)
- src_offset -= etr_buf->size;
}
}
@@ -1471,7 +1478,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
void *config)
{
bool lost = false;
- unsigned long flags, size = 0;
+ unsigned long flags, offset, size = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_perf_buffer *etr_perf = config;
struct etr_buf *etr_buf = etr_perf->etr_buf;
@@ -1484,7 +1491,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
goto out;
}
- if (WARN_ON(drvdata->perf_data != etr_perf)) {
+ if (WARN_ON(drvdata->perf_buf != etr_buf)) {
lost = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
@@ -1496,12 +1503,38 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
tmc_sync_etr_buf(drvdata);
CS_LOCK(drvdata->base);
- /* Reset perf specific data */
- drvdata->perf_data = NULL;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ lost = etr_buf->full;
+ offset = etr_buf->offset;
size = etr_buf->len;
- tmc_etr_sync_perf_buffer(etr_perf);
+
+ /*
+ * The ETR buffer may be bigger than the space available in the
+ * perf ring buffer (handle->size). If so advance the offset so that we
+ * get the latest trace data. In snapshot mode none of that matters
+ * since we are expected to clobber stale data in favour of the latest
+ * traces.
+ */
+ if (!etr_perf->snapshot && size > handle->size) {
+ u32 mask = tmc_get_memwidth_mask(drvdata);
+
+ /*
+ * Make sure the new size is aligned in accordance with the
+ * requirement explained in function tmc_get_memwidth_mask().
+ */
+ size = handle->size & mask;
+ offset = etr_buf->offset + etr_buf->len - size;
+
+ if (offset >= etr_buf->size)
+ offset -= etr_buf->size;
+ lost = true;
+ }
+
+ /* Insert barrier packets at the beginning, if there was an overflow */
+ if (lost)
+ tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+ tmc_etr_sync_perf_buffer(etr_perf, offset, size);
/*
* In snapshot mode we simply increment the head by the number of byte
@@ -1511,8 +1544,6 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
*/
if (etr_perf->snapshot)
handle->head += size;
-
- lost |= etr_buf->full;
out:
/*
* Don't set the TRUNCATED flag in snapshot mode because 1) the
@@ -1556,7 +1587,6 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
}
etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
- drvdata->perf_data = etr_perf;
/*
* No HW configuration is needed if the sink is already in
@@ -1572,6 +1602,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
/* Associate with monitored process. */
drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ drvdata->perf_buf = etr_perf->etr_buf;
atomic_inc(csdev->refcnt);
}
@@ -1617,6 +1648,8 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
/* Dissociate from monitored process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
+ /* Reset perf specific data */
+ drvdata->perf_buf = NULL;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index be37aff573b4..1cf82fa58289 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -70,6 +70,34 @@ void tmc_disable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(0x0, drvdata->base + TMC_CTL);
}
+u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
+{
+ u32 mask = 0;
+
+ /*
+ * When moving RRP or an offset address forward, the new values must
+ * be byte-address aligned to the width of the trace memory databus
+ * _and_ to a frame boundary (16 byte), whichever is the biggest. For
+ * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
+ * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
+ * be 0s.
+ */
+ switch (drvdata->memwidth) {
+ case TMC_MEM_INTF_WIDTH_32BITS:
+ /* fallthrough */
+ case TMC_MEM_INTF_WIDTH_64BITS:
+ /* fallthrough */
+ case TMC_MEM_INTF_WIDTH_128BITS:
+ mask = GENMASK(31, 4);
+ break;
+ case TMC_MEM_INTF_WIDTH_256BITS:
+ mask = GENMASK(31, 5);
+ break;
+ }
+
+ return mask;
+}
+
static int tmc_read_prepare(struct tmc_drvdata *drvdata)
{
int ret = 0;
@@ -236,6 +264,7 @@ coresight_tmc_reg(ffcr, TMC_FFCR);
coresight_tmc_reg(mode, TMC_MODE);
coresight_tmc_reg(pscr, TMC_PSCR);
coresight_tmc_reg(axictl, TMC_AXICTL);
+coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
coresight_tmc_reg(devid, CORESIGHT_DEVID);
coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
@@ -255,6 +284,7 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_devid.attr,
&dev_attr_dba.attr,
&dev_attr_axictl.attr,
+ &dev_attr_authstatus.attr,
NULL,
};
@@ -342,6 +372,13 @@ static inline bool tmc_etr_can_use_sg(struct device *dev)
return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
}
+static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
+{
+ u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
+
+ return (auth & TMC_AUTH_NSID_MASK) == 0x3;
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
{
@@ -349,6 +386,9 @@ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
u32 dma_mask = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
+ if (!tmc_etr_has_non_secure_access(drvdata))
+ return -EACCES;
+
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 1ed50411cc3c..71de978575f3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -39,6 +39,7 @@
#define TMC_ITATBCTR2 0xef0
#define TMC_ITATBCTR1 0xef4
#define TMC_ITATBCTR0 0xef8
+#define TMC_AUTHSTATUS 0xfb8
/* register description */
/* TMC_CTL - 0x020 */
@@ -47,6 +48,7 @@
#define TMC_STS_TMCREADY_BIT 2
#define TMC_STS_FULL BIT(0)
#define TMC_STS_TRIGGERED BIT(1)
+#define TMC_STS_MEMERR BIT(5)
/*
* TMC_AXICTL - 0x110
*
@@ -89,6 +91,8 @@
#define TMC_DEVID_AXIAW_SHIFT 17
#define TMC_DEVID_AXIAW_MASK 0x7f
+#define TMC_AUTH_NSID_MASK GENMASK(1, 0)
+
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
@@ -178,8 +182,8 @@ struct etr_buf {
* device configuration register (DEVID)
* @idr: Holds etr_bufs allocated for this ETR.
* @idr_mutex: Access serialisation for idr.
- * @perf_data: PERF buffer for ETR.
- * @sysfs_data: SYSFS buffer for ETR.
+ * @sysfs_buf: SYSFS buffer for ETR.
+ * @perf_buf: PERF buffer for ETR.
*/
struct tmc_drvdata {
void __iomem *base;
@@ -202,7 +206,7 @@ struct tmc_drvdata {
struct idr idr;
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
- void *perf_data;
+ struct etr_buf *perf_buf;
};
struct etr_buf_operations {
@@ -251,6 +255,7 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);
+u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata);
/* ETB/ETF functions */
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile
index d9252fa8d9ca..b63eb8f309ad 100644
--- a/drivers/hwtracing/intel_th/Makefile
+++ b/drivers/hwtracing/intel_th/Makefile
@@ -20,3 +20,6 @@ intel_th_msu-y := msu.o
obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o
intel_th_pti-y := pti.o
+
+obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu_sink.o
+intel_th_msu_sink-y := msu-sink.o
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
new file mode 100644
index 000000000000..2c7f5116be12
--- /dev/null
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An example software sink buffer for Intel TH MSU.
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ */
+
+#include <linux/intel_th.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#define MAX_SGTS 16
+
+struct msu_sink_private {
+ struct device *dev;
+ struct sg_table **sgts;
+ unsigned int nr_sgts;
+};
+
+static void *msu_sink_assign(struct device *dev, int *mode)
+{
+ struct msu_sink_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ priv->sgts = kcalloc(MAX_SGTS, sizeof(void *), GFP_KERNEL);
+ if (!priv->sgts) {
+ kfree(priv);
+ return NULL;
+ }
+
+ priv->dev = dev;
+ *mode = MSC_MODE_MULTI;
+
+ return priv;
+}
+
+static void msu_sink_unassign(void *data)
+{
+ struct msu_sink_private *priv = data;
+
+ kfree(priv->sgts);
+ kfree(priv);
+}
+
+/* See also: msc.c: __msc_buffer_win_alloc() */
+static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size)
+{
+ struct msu_sink_private *priv = data;
+ unsigned int nents;
+ struct scatterlist *sg_ptr;
+ void *block;
+ int ret, i;
+
+ if (priv->nr_sgts == MAX_SGTS)
+ return -ENOMEM;
+
+ nents = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ ret = sg_alloc_table(*sgt, nents, GFP_KERNEL);
+ if (ret)
+ return -ENOMEM;
+
+ priv->sgts[priv->nr_sgts++] = *sgt;
+
+ for_each_sg((*sgt)->sgl, sg_ptr, nents, i) {
+ block = dma_alloc_coherent(priv->dev->parent->parent,
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
+ GFP_KERNEL);
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
+ }
+
+ return nents;
+}
+
+/* See also: msc.c: __msc_buffer_win_free() */
+static void msu_sink_free_window(void *data, struct sg_table *sgt)
+{
+ struct msu_sink_private *priv = data;
+ struct scatterlist *sg_ptr;
+ int i;
+
+ for_each_sg(sgt->sgl, sg_ptr, sgt->nents, i) {
+ dma_free_coherent(priv->dev->parent->parent, PAGE_SIZE,
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
+ }
+
+ sg_free_table(sgt);
+ priv->nr_sgts--;
+}
+
+static int msu_sink_ready(void *data, struct sg_table *sgt, size_t bytes)
+{
+ struct msu_sink_private *priv = data;
+
+ intel_th_msc_window_unlock(priv->dev, sgt);
+
+ return 0;
+}
+
+static const struct msu_buffer sink_mbuf = {
+ .name = "sink",
+ .assign = msu_sink_assign,
+ .unassign = msu_sink_unassign,
+ .alloc_window = msu_sink_alloc_window,
+ .free_window = msu_sink_free_window,
+ .ready = msu_sink_ready,
+};
+
+module_intel_th_msu_buffer(sink_mbuf);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 8ab28e5fb366..fc9f15f36ad4 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -17,21 +17,48 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
+#include <linux/intel_th.h>
#include "intel_th.h"
#include "msu.h"
#define msc_dev(x) (&(x)->thdev->dev)
+/*
+ * Lockout state transitions:
+ * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
+ * \-----------/
+ * WIN_READY: window can be used by HW
+ * WIN_INUSE: window is in use
+ * WIN_LOCKED: window is filled up and is being processed by the buffer
+ * handling code
+ *
+ * All state transitions happen automatically, except for the LOCKED->READY,
+ * which needs to be signalled by the buffer code by calling
+ * intel_th_msc_window_unlock().
+ *
+ * When the interrupt handler has to switch to the next window, it checks
+ * whether it's READY, and if it is, it performs the switch and tracing
+ * continues. If it's LOCKED, it stops the trace.
+ */
+enum lockout_state {
+ WIN_READY = 0,
+ WIN_INUSE,
+ WIN_LOCKED
+};
+
/**
* struct msc_window - multiblock mode window descriptor
* @entry: window list linkage (msc::win_list)
* @pgoff: page offset into the buffer that this window starts at
+ * @lockout: lockout state, see comment below
+ * @lo_lock: lockout state serialization
* @nr_blocks: number of blocks (pages) in this window
* @nr_segs: number of segments in this window (<= @nr_blocks)
* @_sgt: array of block descriptors
@@ -40,6 +67,8 @@
struct msc_window {
struct list_head entry;
unsigned long pgoff;
+ enum lockout_state lockout;
+ spinlock_t lo_lock;
unsigned int nr_blocks;
unsigned int nr_segs;
struct msc *msc;
@@ -66,8 +95,8 @@ struct msc_iter {
struct msc_window *start_win;
struct msc_window *win;
unsigned long offset;
- int start_block;
- int block;
+ struct scatterlist *start_block;
+ struct scatterlist *block;
unsigned int block_off;
unsigned int wrap_count;
unsigned int eof;
@@ -77,6 +106,8 @@ struct msc_iter {
* struct msc - MSC device representation
* @reg_base: register window base address
* @thdev: intel_th_device pointer
+ * @mbuf: MSU buffer, if assigned
+ * @mbuf_priv MSU buffer's private data, if @mbuf
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @cur_win: current window
@@ -100,6 +131,10 @@ struct msc {
void __iomem *msu_base;
struct intel_th_device *thdev;
+ const struct msu_buffer *mbuf;
+ void *mbuf_priv;
+
+ struct work_struct work;
struct list_head win_list;
struct sg_table single_sgt;
struct msc_window *cur_win;
@@ -108,6 +143,8 @@ struct msc {
unsigned int single_wrap : 1;
void *base;
dma_addr_t base_addr;
+ u32 orig_addr;
+ u32 orig_sz;
/* <0: no buffer, 0: no users, >0: active users */
atomic_t user_count;
@@ -126,6 +163,101 @@ struct msc {
unsigned int index;
};
+static LIST_HEAD(msu_buffer_list);
+static struct mutex msu_buffer_mutex;
+
+/**
+ * struct msu_buffer_entry - internal MSU buffer bookkeeping
+ * @entry: link to msu_buffer_list
+ * @mbuf: MSU buffer object
+ * @owner: module that provides this MSU buffer
+ */
+struct msu_buffer_entry {
+ struct list_head entry;
+ const struct msu_buffer *mbuf;
+ struct module *owner;
+};
+
+static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
+{
+ struct msu_buffer_entry *mbe;
+
+ lockdep_assert_held(&msu_buffer_mutex);
+
+ list_for_each_entry(mbe, &msu_buffer_list, entry) {
+ if (!strcmp(mbe->mbuf->name, name))
+ return mbe;
+ }
+
+ return NULL;
+}
+
+static const struct msu_buffer *
+msu_buffer_get(const char *name)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(name);
+ if (mbe && !try_module_get(mbe->owner))
+ mbe = NULL;
+ mutex_unlock(&msu_buffer_mutex);
+
+ return mbe ? mbe->mbuf : NULL;
+}
+
+static void msu_buffer_put(const struct msu_buffer *mbuf)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(mbuf->name);
+ if (mbe)
+ module_put(mbe->owner);
+ mutex_unlock(&msu_buffer_mutex);
+}
+
+int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
+ struct module *owner)
+{
+ struct msu_buffer_entry *mbe;
+ int ret = 0;
+
+ mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
+ if (!mbe)
+ return -ENOMEM;
+
+ mutex_lock(&msu_buffer_mutex);
+ if (__msu_buffer_entry_find(mbuf->name)) {
+ ret = -EEXIST;
+ kfree(mbe);
+ goto unlock;
+ }
+
+ mbe->mbuf = mbuf;
+ mbe->owner = owner;
+ list_add_tail(&mbe->entry, &msu_buffer_list);
+unlock:
+ mutex_unlock(&msu_buffer_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
+
+void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(mbuf->name);
+ if (mbe) {
+ list_del(&mbe->entry);
+ kfree(mbe);
+ }
+ mutex_unlock(&msu_buffer_mutex);
+}
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
+
static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
{
/* header hasn't been written */
@@ -139,28 +271,25 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
return false;
}
-static inline struct msc_block_desc *
-msc_win_block(struct msc_window *win, unsigned int block)
+static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
{
- return sg_virt(&win->sgt->sgl[block]);
+ return win->sgt->sgl;
}
-static inline size_t
-msc_win_actual_bsz(struct msc_window *win, unsigned int block)
+static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
{
- return win->sgt->sgl[block].length;
+ return sg_virt(msc_win_base_sg(win));
}
-static inline dma_addr_t
-msc_win_baddr(struct msc_window *win, unsigned int block)
+static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
{
- return sg_dma_address(&win->sgt->sgl[block]);
+ return sg_dma_address(msc_win_base_sg(win));
}
static inline unsigned long
-msc_win_bpfn(struct msc_window *win, unsigned int block)
+msc_win_base_pfn(struct msc_window *win)
{
- return msc_win_baddr(win, block) >> PAGE_SHIFT;
+ return PFN_DOWN(msc_win_base_dma(win));
}
/**
@@ -188,6 +317,26 @@ static struct msc_window *msc_next_window(struct msc_window *win)
return list_next_entry(win, entry);
}
+static size_t msc_win_total_sz(struct msc_window *win)
+{
+ struct scatterlist *sg;
+ unsigned int blk;
+ size_t size = 0;
+
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
+
+ if (msc_block_wrapped(bdesc))
+ return win->nr_blocks << PAGE_SHIFT;
+
+ size += msc_total_sz(bdesc);
+ if (msc_block_last_written(bdesc))
+ break;
+ }
+
+ return size;
+}
+
/**
* msc_find_window() - find a window matching a given sg_table
* @msc: MSC device
@@ -216,7 +365,7 @@ msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
found++;
/* skip the empty ones */
- if (nonempty && msc_block_is_empty(msc_win_block(win, 0)))
+ if (nonempty && msc_block_is_empty(msc_win_base(win)))
continue;
if (found)
@@ -250,44 +399,38 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
}
/**
- * msc_win_oldest_block() - locate the oldest block in a given window
+ * msc_win_oldest_sg() - locate the oldest block in a given window
* @win: window to look at
*
* Return: index of the block with the oldest data
*/
-static unsigned int msc_win_oldest_block(struct msc_window *win)
+static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
{
unsigned int blk;
- struct msc_block_desc *bdesc = msc_win_block(win, 0);
+ struct scatterlist *sg;
+ struct msc_block_desc *bdesc = msc_win_base(win);
/* without wrapping, first block is the oldest */
if (!msc_block_wrapped(bdesc))
- return 0;
+ return msc_win_base_sg(win);
/*
* with wrapping, last written block contains both the newest and the
* oldest data for this window.
*/
- for (blk = 0; blk < win->nr_segs; blk++) {
- bdesc = msc_win_block(win, blk);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
if (msc_block_last_written(bdesc))
- return blk;
+ return sg;
}
- return 0;
+ return msc_win_base_sg(win);
}
static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
{
- return msc_win_block(iter->win, iter->block);
-}
-
-static void msc_iter_init(struct msc_iter *iter)
-{
- memset(iter, 0, sizeof(*iter));
- iter->start_block = -1;
- iter->block = -1;
+ return sg_virt(iter->block);
}
static struct msc_iter *msc_iter_install(struct msc *msc)
@@ -312,7 +455,6 @@ static struct msc_iter *msc_iter_install(struct msc *msc)
goto unlock;
}
- msc_iter_init(iter);
iter->msc = msc;
list_add_tail(&iter->entry, &msc->iter_list);
@@ -333,10 +475,10 @@ static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
static void msc_iter_block_start(struct msc_iter *iter)
{
- if (iter->start_block != -1)
+ if (iter->start_block)
return;
- iter->start_block = msc_win_oldest_block(iter->win);
+ iter->start_block = msc_win_oldest_sg(iter->win);
iter->block = iter->start_block;
iter->wrap_count = 0;
@@ -360,7 +502,7 @@ static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
return -EINVAL;
iter->win = iter->start_win;
- iter->start_block = -1;
+ iter->start_block = NULL;
msc_iter_block_start(iter);
@@ -370,7 +512,7 @@ static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
static int msc_iter_win_advance(struct msc_iter *iter)
{
iter->win = msc_next_window(iter->win);
- iter->start_block = -1;
+ iter->start_block = NULL;
if (iter->win == iter->start_win) {
iter->eof++;
@@ -400,8 +542,10 @@ static int msc_iter_block_advance(struct msc_iter *iter)
return msc_iter_win_advance(iter);
/* block advance */
- if (++iter->block == iter->win->nr_segs)
- iter->block = 0;
+ if (sg_is_last(iter->block))
+ iter->block = msc_win_base_sg(iter->win);
+ else
+ iter->block = sg_next(iter->block);
/* no wrapping, sanity check in case there is no last written block */
if (!iter->wrap_count && iter->block == iter->start_block)
@@ -506,14 +650,15 @@ next_block:
static void msc_buffer_clear_hw_header(struct msc *msc)
{
struct msc_window *win;
+ struct scatterlist *sg;
list_for_each_entry(win, &msc->win_list, entry) {
unsigned int blk;
size_t hw_sz = sizeof(struct msc_block_desc) -
offsetof(struct msc_block_desc, hw_tag);
- for (blk = 0; blk < win->nr_segs; blk++) {
- struct msc_block_desc *bdesc = msc_win_block(win, blk);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
memset(&bdesc->hw_tag, 0, hw_sz);
}
@@ -527,6 +672,9 @@ static int intel_th_msu_init(struct msc *msc)
if (!msc->do_irq)
return 0;
+ if (!msc->mbuf)
+ return 0;
+
mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
mintctl |= msc->index ? M1BLIE : M0BLIE;
iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
@@ -554,6 +702,49 @@ static void intel_th_msu_deinit(struct msc *msc)
iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
}
+static int msc_win_set_lockout(struct msc_window *win,
+ enum lockout_state expect,
+ enum lockout_state new)
+{
+ enum lockout_state old;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!win->msc->mbuf)
+ return 0;
+
+ spin_lock_irqsave(&win->lo_lock, flags);
+ old = win->lockout;
+
+ if (old != expect) {
+ ret = -EINVAL;
+ dev_warn_ratelimited(msc_dev(win->msc),
+ "expected lockout state %d, got %d\n",
+ expect, old);
+ goto unlock;
+ }
+
+ win->lockout = new;
+
+ if (old == expect && new == WIN_LOCKED)
+ atomic_inc(&win->msc->user_count);
+ else if (old == expect && old == WIN_LOCKED)
+ atomic_dec(&win->msc->user_count);
+
+unlock:
+ spin_unlock_irqrestore(&win->lo_lock, flags);
+
+ if (ret) {
+ if (expect == WIN_READY && old == WIN_LOCKED)
+ return -EBUSY;
+
+ /* from intel_th_msc_window_unlock(), don't warn if not locked */
+ if (expect == WIN_LOCKED && old == new)
+ return 0;
+ }
+
+ return ret;
+}
/**
* msc_configure() - set up MSC hardware
* @msc: the MSC device to configure
@@ -571,8 +762,15 @@ static int msc_configure(struct msc *msc)
if (msc->mode > MSC_MODE_MULTI)
return -ENOTSUPP;
- if (msc->mode == MSC_MODE_MULTI)
+ if (msc->mode == MSC_MODE_MULTI) {
+ if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
+ return -EBUSY;
+
msc_buffer_clear_hw_header(msc);
+ }
+
+ msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
+ msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
reg = msc->base_addr >> PAGE_SHIFT;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
@@ -594,10 +792,14 @@ static int msc_configure(struct msc *msc)
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
+ intel_th_msu_init(msc);
+
msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
intel_th_trace_enable(msc->thdev);
msc->enabled = 1;
+ if (msc->mbuf && msc->mbuf->activate)
+ msc->mbuf->activate(msc->mbuf_priv);
return 0;
}
@@ -611,10 +813,17 @@ static int msc_configure(struct msc *msc)
*/
static void msc_disable(struct msc *msc)
{
+ struct msc_window *win = msc->cur_win;
u32 reg;
lockdep_assert_held(&msc->buf_mutex);
+ if (msc->mode == MSC_MODE_MULTI)
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
+
+ if (msc->mbuf && msc->mbuf->deactivate)
+ msc->mbuf->deactivate(msc->mbuf_priv);
+ intel_th_msu_deinit(msc);
intel_th_trace_disable(msc->thdev);
if (msc->mode == MSC_MODE_SINGLE) {
@@ -630,16 +839,25 @@ static void msc_disable(struct msc *msc)
reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
reg &= ~MSC_EN;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
+
+ if (msc->mbuf && msc->mbuf->ready)
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
+ msc_win_total_sz(win));
+
msc->enabled = 0;
- iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
- iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
+ iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
+ iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
+
+ reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
+ reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+ iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
}
static int intel_th_msc_activate(struct intel_th_device *thdev)
@@ -791,10 +1009,9 @@ static int __msc_buffer_win_alloc(struct msc_window *win,
return nr_segs;
err_nomem:
- for (i--; i >= 0; i--)
+ for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- msc_win_block(win, i),
- msc_win_baddr(win, i));
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
sg_free_table(win->sgt);
@@ -804,20 +1021,26 @@ err_nomem:
#ifdef CONFIG_X86
static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
{
+ struct scatterlist *sg_ptr;
int i;
- for (i = 0; i < nr_segs; i++)
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
/* Set the page as uncached */
- set_memory_uc((unsigned long)msc_win_block(win, i), 1);
+ set_memory_uc((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
}
static void msc_buffer_set_wb(struct msc_window *win)
{
+ struct scatterlist *sg_ptr;
int i;
- for (i = 0; i < win->nr_segs; i++)
+ for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
/* Reset the page to write-back */
- set_memory_wb((unsigned long)msc_win_block(win, i), 1);
+ set_memory_wb((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
}
#else /* !X86 */
static inline void
@@ -843,19 +1066,14 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
if (!nr_blocks)
return 0;
- /*
- * This limitation hold as long as we need random access to the
- * block. When that changes, this can go away.
- */
- if (nr_blocks > SG_MAX_SINGLE_ALLOC)
- return -EINVAL;
-
win = kzalloc(sizeof(*win), GFP_KERNEL);
if (!win)
return -ENOMEM;
win->msc = msc;
win->sgt = &win->_sgt;
+ win->lockout = WIN_READY;
+ spin_lock_init(&win->lo_lock);
if (!list_empty(&msc->win_list)) {
struct msc_window *prev = list_last_entry(&msc->win_list,
@@ -865,8 +1083,13 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
win->pgoff = prev->pgoff + prev->nr_blocks;
}
- ret = __msc_buffer_win_alloc(win, nr_blocks);
- if (ret < 0)
+ if (msc->mbuf && msc->mbuf->alloc_window)
+ ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
+ nr_blocks << PAGE_SHIFT);
+ else
+ ret = __msc_buffer_win_alloc(win, nr_blocks);
+
+ if (ret <= 0)
goto err_nomem;
msc_buffer_set_uc(win, ret);
@@ -875,8 +1098,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
win->nr_blocks = nr_blocks;
if (list_empty(&msc->win_list)) {
- msc->base = msc_win_block(win, 0);
- msc->base_addr = msc_win_baddr(win, 0);
+ msc->base = msc_win_base(win);
+ msc->base_addr = msc_win_base_dma(win);
msc->cur_win = win;
}
@@ -893,14 +1116,15 @@ err_nomem:
static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
{
+ struct scatterlist *sg;
int i;
- for (i = 0; i < win->nr_segs; i++) {
- struct page *page = sg_page(&win->sgt->sgl[i]);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
+ struct page *page = sg_page(sg);
page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- msc_win_block(win, i), msc_win_baddr(win, i));
+ sg_virt(sg), sg_dma_address(sg));
}
sg_free_table(win->sgt);
}
@@ -925,7 +1149,10 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
msc_buffer_set_wb(win);
- __msc_buffer_win_free(msc, win);
+ if (msc->mbuf && msc->mbuf->free_window)
+ msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
+ else
+ __msc_buffer_win_free(msc, win);
kfree(win);
}
@@ -943,6 +1170,7 @@ static void msc_buffer_relink(struct msc *msc)
/* call with msc::mutex locked */
list_for_each_entry(win, &msc->win_list, entry) {
+ struct scatterlist *sg;
unsigned int blk;
u32 sw_tag = 0;
@@ -958,12 +1186,12 @@ static void msc_buffer_relink(struct msc *msc)
next_win = list_next_entry(win, entry);
}
- for (blk = 0; blk < win->nr_segs; blk++) {
- struct msc_block_desc *bdesc = msc_win_block(win, blk);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
memset(bdesc, 0, sizeof(*bdesc));
- bdesc->next_win = msc_win_bpfn(next_win, 0);
+ bdesc->next_win = msc_win_base_pfn(next_win);
/*
* Similarly to last window, last block should point
@@ -971,13 +1199,15 @@ static void msc_buffer_relink(struct msc *msc)
*/
if (blk == win->nr_segs - 1) {
sw_tag |= MSC_SW_TAG_LASTBLK;
- bdesc->next_blk = msc_win_bpfn(win, 0);
+ bdesc->next_blk = msc_win_base_pfn(win);
} else {
- bdesc->next_blk = msc_win_bpfn(win, blk + 1);
+ dma_addr_t addr = sg_dma_address(sg_next(sg));
+
+ bdesc->next_blk = PFN_DOWN(addr);
}
bdesc->sw_tag = sw_tag;
- bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64;
+ bdesc->block_sz = sg->length / 64;
}
}
@@ -1136,6 +1366,7 @@ static int msc_buffer_free_unless_used(struct msc *msc)
static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
{
struct msc_window *win;
+ struct scatterlist *sg;
unsigned int blk;
if (msc->mode == MSC_MODE_SINGLE)
@@ -1150,9 +1381,9 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
found:
pgoff -= win->pgoff;
- for (blk = 0; blk < win->nr_segs; blk++) {
- struct page *page = sg_page(&win->sgt->sgl[blk]);
- size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk));
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct page *page = sg_page(sg);
+ size_t pgsz = PFN_DOWN(sg->length);
if (pgoff < pgsz)
return page + pgoff;
@@ -1456,24 +1687,83 @@ static void msc_win_switch(struct msc *msc)
else
msc->cur_win = list_next_entry(msc->cur_win, entry);
- msc->base = msc_win_block(msc->cur_win, 0);
- msc->base_addr = msc_win_baddr(msc->cur_win, 0);
+ msc->base = msc_win_base(msc->cur_win);
+ msc->base_addr = msc_win_base_dma(msc->cur_win);
intel_th_trace_switch(msc->thdev);
}
+/**
+ * intel_th_msc_window_unlock - put the window back in rotation
+ * @dev: MSC device to which this relates
+ * @sgt: buffer's sg_table for the window, does nothing if NULL
+ */
+void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ struct msc_window *win;
+
+ if (!sgt)
+ return;
+
+ win = msc_find_window(msc, sgt, false);
+ if (!win)
+ return;
+
+ msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
+}
+EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
+
+static void msc_work(struct work_struct *work)
+{
+ struct msc *msc = container_of(work, struct msc, work);
+
+ intel_th_msc_deactivate(msc->thdev);
+}
+
static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
{
struct msc *msc = dev_get_drvdata(&thdev->dev);
u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+ struct msc_window *win, *next_win;
+
+ if (!msc->do_irq || !msc->mbuf)
+ return IRQ_NONE;
+
+ msusts &= mask;
+
+ if (!msusts)
+ return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
+
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
- if (!(msusts & mask)) {
- if (msc->enabled)
- return IRQ_HANDLED;
+ if (!msc->enabled)
return IRQ_NONE;
+
+ /* grab the window before we do the switch */
+ win = msc->cur_win;
+ if (!win)
+ return IRQ_HANDLED;
+ next_win = msc_next_window(win);
+ if (!next_win)
+ return IRQ_HANDLED;
+
+ /* next window: if READY, proceed, if LOCKED, stop the trace */
+ if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
+ schedule_work(&msc->work);
+ return IRQ_HANDLED;
}
+ /* current window: INUSE -> LOCKED */
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
+
+ msc_win_switch(msc);
+
+ if (msc->mbuf && msc->mbuf->ready)
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
+ msc_win_total_sz(win));
+
return IRQ_HANDLED;
}
@@ -1511,21 +1801,43 @@ wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
static DEVICE_ATTR_RW(wrap);
+static void msc_buffer_unassign(struct msc *msc)
+{
+ lockdep_assert_held(&msc->buf_mutex);
+
+ if (!msc->mbuf)
+ return;
+
+ msc->mbuf->unassign(msc->mbuf_priv);
+ msu_buffer_put(msc->mbuf);
+ msc->mbuf_priv = NULL;
+ msc->mbuf = NULL;
+}
+
static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct msc *msc = dev_get_drvdata(dev);
+ const char *mode = msc_mode[msc->mode];
+ ssize_t ret;
+
+ mutex_lock(&msc->buf_mutex);
+ if (msc->mbuf)
+ mode = msc->mbuf->name;
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
+ mutex_unlock(&msc->buf_mutex);
- return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
+ return ret;
}
static ssize_t
mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t size)
{
+ const struct msu_buffer *mbuf = NULL;
struct msc *msc = dev_get_drvdata(dev);
size_t len = size;
- char *cp;
+ char *cp, *mode;
int i, ret;
if (!capable(CAP_SYS_RAWIO))
@@ -1535,17 +1847,59 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
if (cp)
len = cp - buf;
- for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
- if (!strncmp(msc_mode[i], buf, len))
- goto found;
+ mode = kstrndup(buf, len, GFP_KERNEL);
+ i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
+ if (i >= 0)
+ goto found;
+
+ /* Buffer sinks only work with a usable IRQ */
+ if (!msc->do_irq) {
+ kfree(mode);
+ return -EINVAL;
+ }
+
+ mbuf = msu_buffer_get(mode);
+ kfree(mode);
+ if (mbuf)
+ goto found;
return -EINVAL;
found:
mutex_lock(&msc->buf_mutex);
+ ret = 0;
+
+ /* Same buffer: do nothing */
+ if (mbuf && mbuf == msc->mbuf) {
+ /* put the extra reference we just got */
+ msu_buffer_put(mbuf);
+ goto unlock;
+ }
+
ret = msc_buffer_unlocked_free_unless_used(msc);
- if (!ret)
- msc->mode = i;
+ if (ret)
+ goto unlock;
+
+ if (mbuf) {
+ void *mbuf_priv = mbuf->assign(dev, &i);
+
+ if (!mbuf_priv) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ msc_buffer_unassign(msc);
+ msc->mbuf_priv = mbuf_priv;
+ msc->mbuf = mbuf;
+ } else {
+ msc_buffer_unassign(msc);
+ }
+
+ msc->mode = i;
+
+unlock:
+ if (ret && mbuf)
+ msu_buffer_put(mbuf);
mutex_unlock(&msc->buf_mutex);
return ret ? ret : size;
@@ -1667,7 +2021,12 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&msc->buf_mutex);
- if (msc->mode != MSC_MODE_MULTI)
+ /*
+ * Window switch can only happen in the "multi" mode.
+ * If a external buffer is engaged, they have the full
+ * control over window switching.
+ */
+ if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
ret = -ENOTSUPP;
else
msc_win_switch(msc);
@@ -1720,10 +2079,7 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
msc->reg_base = base + msc->index * 0x100;
msc->msu_base = base;
- err = intel_th_msu_init(msc);
- if (err)
- return err;
-
+ INIT_WORK(&msc->work, msc_work);
err = intel_th_msc_init(msc);
if (err)
return err;
@@ -1739,7 +2095,6 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
int ret;
intel_th_msc_deactivate(thdev);
- intel_th_msu_deinit(msc);
/*
* Buffers should not be used at this point except if the
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 13d9b141daaa..e771f509bd02 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -44,14 +44,6 @@ enum {
#define M0BLIE BIT(16)
#define M1BLIE BIT(24)
-/* MSC operating modes (MSC_MODE) */
-enum {
- MSC_MODE_SINGLE = 0,
- MSC_MODE_MULTI,
- MSC_MODE_EXI,
- MSC_MODE_DEBUG,
-};
-
/* MSCnSTS bits */
#define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */
#define MSCSTS_PLE BIT(2) /* Pipeline Empty */
@@ -93,6 +85,16 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
return bdesc->valid_dw * 4 - MSC_BDESC;
}
+static inline unsigned long msc_total_sz(struct msc_block_desc *bdesc)
+{
+ return bdesc->valid_dw * 4;
+}
+
+static inline unsigned long msc_block_sz(struct msc_block_desc *bdesc)
+{
+ return bdesc->block_sz * 64 - MSC_BDESC;
+}
+
static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
{
if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
@@ -104,7 +106,7 @@ static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
static inline bool msc_block_last_written(struct msc_block_desc *bdesc)
{
if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) ||
- (msc_data_sz(bdesc) != DATA_IN_PAGE))
+ (msc_data_sz(bdesc) != msc_block_sz(bdesc)))
return true;
return false;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 871eb4bc4efc..7b971228df38 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -29,6 +29,7 @@ static struct dentry *icc_debugfs_dir;
* @req_node: entry in list of requests for the particular @node
* @node: the interconnect node to which this constraint applies
* @dev: reference to the device that sets the constraints
+ * @tag: path tag (optional)
* @avg_bw: an integer describing the average bandwidth in kBps
* @peak_bw: an integer describing the peak bandwidth in kBps
*/
@@ -36,6 +37,7 @@ struct icc_req {
struct hlist_node req_node;
struct icc_node *node;
struct device *dev;
+ u32 tag;
u32 avg_bw;
u32 peak_bw;
};
@@ -203,8 +205,11 @@ static int aggregate_requests(struct icc_node *node)
node->avg_bw = 0;
node->peak_bw = 0;
+ if (p->pre_aggregate)
+ p->pre_aggregate(node);
+
hlist_for_each_entry(r, &node->req_list, req_node)
- p->aggregate(node, r->avg_bw, r->peak_bw,
+ p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
&node->avg_bw, &node->peak_bw);
return 0;
@@ -386,6 +391,26 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
EXPORT_SYMBOL_GPL(of_icc_get);
/**
+ * icc_set_tag() - set an optional tag on a path
+ * @path: the path we want to tag
+ * @tag: the tag value
+ *
+ * This function allows consumers to append a tag to the requests associated
+ * with a path, so that a different aggregation could be done based on this tag.
+ */
+void icc_set_tag(struct icc_path *path, u32 tag)
+{
+ int i;
+
+ if (!path)
+ return;
+
+ for (i = 0; i < path->num_nodes; i++)
+ path->reqs[i].tag = tag;
+}
+EXPORT_SYMBOL_GPL(icc_set_tag);
+
+/**
* icc_set_bw() - set bandwidth constraints on an interconnect path
* @path: reference to the path returned by icc_get()
* @avg_bw: average bandwidth in kilobytes per second
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index d5e70ebc2410..6ab4012a059a 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_QCS404
+ tristate "Qualcomm QCS404 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on qcs404-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -12,3 +21,6 @@ config INTERCONNECT_QCOM_SDM845
help
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
+
+config INTERCONNECT_QCOM_SMD_RPM
+ tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 1c1cea690f92..67dafb783dec 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,5 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
+icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
new file mode 100644
index 000000000000..910081d6ddc0
--- /dev/null
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd
+ */
+
+#include <dt-bindings/interconnect/qcom,qcs404.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+enum {
+ QCS404_MASTER_AMPSS_M0 = 1,
+ QCS404_MASTER_GRAPHICS_3D,
+ QCS404_MASTER_MDP_PORT0,
+ QCS404_SNOC_BIMC_1_MAS,
+ QCS404_MASTER_TCU_0,
+ QCS404_MASTER_SPDM,
+ QCS404_MASTER_BLSP_1,
+ QCS404_MASTER_BLSP_2,
+ QCS404_MASTER_XM_USB_HS1,
+ QCS404_MASTER_CRYPTO_CORE0,
+ QCS404_MASTER_SDCC_1,
+ QCS404_MASTER_SDCC_2,
+ QCS404_SNOC_PNOC_MAS,
+ QCS404_MASTER_QPIC,
+ QCS404_MASTER_QDSS_BAM,
+ QCS404_BIMC_SNOC_MAS,
+ QCS404_PNOC_SNOC_MAS,
+ QCS404_MASTER_QDSS_ETR,
+ QCS404_MASTER_EMAC,
+ QCS404_MASTER_PCIE,
+ QCS404_MASTER_USB3,
+ QCS404_PNOC_INT_0,
+ QCS404_PNOC_INT_2,
+ QCS404_PNOC_INT_3,
+ QCS404_PNOC_SLV_0,
+ QCS404_PNOC_SLV_1,
+ QCS404_PNOC_SLV_2,
+ QCS404_PNOC_SLV_3,
+ QCS404_PNOC_SLV_4,
+ QCS404_PNOC_SLV_6,
+ QCS404_PNOC_SLV_7,
+ QCS404_PNOC_SLV_8,
+ QCS404_PNOC_SLV_9,
+ QCS404_PNOC_SLV_10,
+ QCS404_PNOC_SLV_11,
+ QCS404_SNOC_QDSS_INT,
+ QCS404_SNOC_INT_0,
+ QCS404_SNOC_INT_1,
+ QCS404_SNOC_INT_2,
+ QCS404_SLAVE_EBI_CH0,
+ QCS404_BIMC_SNOC_SLV,
+ QCS404_SLAVE_SPDM_WRAPPER,
+ QCS404_SLAVE_PDM,
+ QCS404_SLAVE_PRNG,
+ QCS404_SLAVE_TCSR,
+ QCS404_SLAVE_SNOC_CFG,
+ QCS404_SLAVE_MESSAGE_RAM,
+ QCS404_SLAVE_DISPLAY_CFG,
+ QCS404_SLAVE_GRAPHICS_3D_CFG,
+ QCS404_SLAVE_BLSP_1,
+ QCS404_SLAVE_TLMM_NORTH,
+ QCS404_SLAVE_PCIE_1,
+ QCS404_SLAVE_EMAC_CFG,
+ QCS404_SLAVE_BLSP_2,
+ QCS404_SLAVE_TLMM_EAST,
+ QCS404_SLAVE_TCU,
+ QCS404_SLAVE_PMIC_ARB,
+ QCS404_SLAVE_SDCC_1,
+ QCS404_SLAVE_SDCC_2,
+ QCS404_SLAVE_TLMM_SOUTH,
+ QCS404_SLAVE_USB_HS,
+ QCS404_SLAVE_USB3,
+ QCS404_SLAVE_CRYPTO_0_CFG,
+ QCS404_PNOC_SNOC_SLV,
+ QCS404_SLAVE_APPSS,
+ QCS404_SLAVE_WCSS,
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SLAVE_OCIMEM,
+ QCS404_SNOC_PNOC_SLV,
+ QCS404_SLAVE_QDSS_STM,
+ QCS404_SLAVE_CATS_128,
+ QCS404_SLAVE_OCMEM_64,
+ QCS404_SLAVE_LPASS,
+};
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+static const struct clk_bulk_data bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define QCS404_MAX_LINKS 12
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM id for devices that are bus masters
+ * @slv_rpm_id: RPM id for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct qcom_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[QCS404_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
+DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
+DEFINE_QNODE(mas_blsp_1, QCS404_MASTER_BLSP_1, 4, 41, -1, QCS404_PNOC_INT_3);
+DEFINE_QNODE(mas_blsp_2, QCS404_MASTER_BLSP_2, 4, 39, -1, QCS404_PNOC_INT_3);
+DEFINE_QNODE(mas_xi_usb_hs1, QCS404_MASTER_XM_USB_HS1, 8, 138, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_crypto, QCS404_MASTER_CRYPTO_CORE0, 8, 23, -1, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
+DEFINE_QNODE(mas_sdcc_1, QCS404_MASTER_SDCC_1, 8, 33, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_sdcc_2, QCS404_MASTER_SDCC_2, 8, 35, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_snoc_pcnoc, QCS404_SNOC_PNOC_MAS, 8, 77, -1, QCS404_PNOC_INT_2);
+DEFINE_QNODE(mas_qpic, QCS404_MASTER_QPIC, 4, -1, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_qdss_bam, QCS404_MASTER_QDSS_BAM, 4, -1, -1, QCS404_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_bimc_snoc, QCS404_BIMC_SNOC_MAS, 8, 21, -1, QCS404_SLAVE_OCMEM_64, QCS404_SLAVE_CATS_128, QCS404_SNOC_INT_0, QCS404_SNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_snoc, QCS404_PNOC_SNOC_MAS, 8, 29, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_2, QCS404_SNOC_INT_0);
+DEFINE_QNODE(mas_qdss_etr, QCS404_MASTER_QDSS_ETR, 8, -1, -1, QCS404_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_emac, QCS404_MASTER_EMAC, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(mas_pcie, QCS404_MASTER_PCIE, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(mas_usb3, QCS404_MASTER_USB3, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(pcnoc_int_0, QCS404_PNOC_INT_0, 8, 85, 114, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
+DEFINE_QNODE(pcnoc_int_2, QCS404_PNOC_INT_2, 8, 124, 184, QCS404_PNOC_SLV_10, QCS404_SLAVE_TCU, QCS404_PNOC_SLV_11, QCS404_PNOC_SLV_2, QCS404_PNOC_SLV_3, QCS404_PNOC_SLV_0, QCS404_PNOC_SLV_1, QCS404_PNOC_SLV_6, QCS404_PNOC_SLV_7, QCS404_PNOC_SLV_4, QCS404_PNOC_SLV_8, QCS404_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_3, QCS404_PNOC_INT_3, 8, 125, 185, QCS404_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_s_0, QCS404_PNOC_SLV_0, 4, 89, 118, QCS404_SLAVE_PRNG, QCS404_SLAVE_SPDM_WRAPPER, QCS404_SLAVE_PDM);
+DEFINE_QNODE(pcnoc_s_1, QCS404_PNOC_SLV_1, 4, 90, 119, QCS404_SLAVE_TCSR);
+DEFINE_QNODE(pcnoc_s_2, QCS404_PNOC_SLV_2, 4, -1, -1, QCS404_SLAVE_GRAPHICS_3D_CFG);
+DEFINE_QNODE(pcnoc_s_3, QCS404_PNOC_SLV_3, 4, 92, 121, QCS404_SLAVE_MESSAGE_RAM);
+DEFINE_QNODE(pcnoc_s_4, QCS404_PNOC_SLV_4, 4, 93, 122, QCS404_SLAVE_SNOC_CFG);
+DEFINE_QNODE(pcnoc_s_6, QCS404_PNOC_SLV_6, 4, 94, 123, QCS404_SLAVE_BLSP_1, QCS404_SLAVE_TLMM_NORTH, QCS404_SLAVE_EMAC_CFG);
+DEFINE_QNODE(pcnoc_s_7, QCS404_PNOC_SLV_7, 4, 95, 124, QCS404_SLAVE_TLMM_SOUTH, QCS404_SLAVE_DISPLAY_CFG, QCS404_SLAVE_SDCC_1, QCS404_SLAVE_PCIE_1, QCS404_SLAVE_SDCC_2);
+DEFINE_QNODE(pcnoc_s_8, QCS404_PNOC_SLV_8, 4, 96, 125, QCS404_SLAVE_CRYPTO_0_CFG);
+DEFINE_QNODE(pcnoc_s_9, QCS404_PNOC_SLV_9, 4, 97, 126, QCS404_SLAVE_BLSP_2, QCS404_SLAVE_TLMM_EAST, QCS404_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_10, QCS404_PNOC_SLV_10, 4, 157, -1, QCS404_SLAVE_USB_HS);
+DEFINE_QNODE(pcnoc_s_11, QCS404_PNOC_SLV_11, 4, 158, 246, QCS404_SLAVE_USB3);
+DEFINE_QNODE(qdss_int, QCS404_SNOC_QDSS_INT, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(snoc_int_0, QCS404_SNOC_INT_0, 8, 99, 130, QCS404_SLAVE_LPASS, QCS404_SLAVE_APPSS, QCS404_SLAVE_WCSS);
+DEFINE_QNODE(snoc_int_1, QCS404_SNOC_INT_1, 8, 100, 131, QCS404_SNOC_PNOC_SLV, QCS404_SNOC_INT_2);
+DEFINE_QNODE(snoc_int_2, QCS404_SNOC_INT_2, 8, 134, 197, QCS404_SLAVE_QDSS_STM, QCS404_SLAVE_OCIMEM);
+DEFINE_QNODE(slv_ebi, QCS404_SLAVE_EBI_CH0, 8, -1, 0, 0);
+DEFINE_QNODE(slv_bimc_snoc, QCS404_BIMC_SNOC_SLV, 8, -1, 2, QCS404_BIMC_SNOC_MAS);
+DEFINE_QNODE(slv_spdm, QCS404_SLAVE_SPDM_WRAPPER, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, QCS404_SLAVE_PDM, 4, -1, 41, 0);
+DEFINE_QNODE(slv_prng, QCS404_SLAVE_PRNG, 4, -1, 44, 0);
+DEFINE_QNODE(slv_tcsr, QCS404_SLAVE_TCSR, 4, -1, 50, 0);
+DEFINE_QNODE(slv_snoc_cfg, QCS404_SLAVE_SNOC_CFG, 4, -1, 70, 0);
+DEFINE_QNODE(slv_message_ram, QCS404_SLAVE_MESSAGE_RAM, 4, -1, 55, 0);
+DEFINE_QNODE(slv_disp_ss_cfg, QCS404_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_gpu_cfg, QCS404_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, QCS404_SLAVE_BLSP_1, 4, -1, 39, 0);
+DEFINE_QNODE(slv_tlmm_north, QCS404_SLAVE_TLMM_NORTH, 4, -1, 214, 0);
+DEFINE_QNODE(slv_pcie, QCS404_SLAVE_PCIE_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ethernet, QCS404_SLAVE_EMAC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_2, QCS404_SLAVE_BLSP_2, 4, -1, 37, 0);
+DEFINE_QNODE(slv_tlmm_east, QCS404_SLAVE_TLMM_EAST, 4, -1, 213, 0);
+DEFINE_QNODE(slv_tcu, QCS404_SLAVE_TCU, 8, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, QCS404_SLAVE_PMIC_ARB, 4, -1, 59, 0);
+DEFINE_QNODE(slv_sdcc_1, QCS404_SLAVE_SDCC_1, 4, -1, 31, 0);
+DEFINE_QNODE(slv_sdcc_2, QCS404_SLAVE_SDCC_2, 4, -1, 33, 0);
+DEFINE_QNODE(slv_tlmm_south, QCS404_SLAVE_TLMM_SOUTH, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs, QCS404_SLAVE_USB_HS, 4, -1, 40, 0);
+DEFINE_QNODE(slv_usb3, QCS404_SLAVE_USB3, 4, -1, 22, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, QCS404_SLAVE_CRYPTO_0_CFG, 4, -1, 52, 0);
+DEFINE_QNODE(slv_pcnoc_snoc, QCS404_PNOC_SNOC_SLV, 8, -1, 45, QCS404_PNOC_SNOC_MAS);
+DEFINE_QNODE(slv_kpss_ahb, QCS404_SLAVE_APPSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_wcss, QCS404_SLAVE_WCSS, 4, -1, 23, 0);
+DEFINE_QNODE(slv_snoc_bimc_1, QCS404_SNOC_BIMC_1_SLV, 8, -1, 104, QCS404_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(slv_imem, QCS404_SLAVE_OCIMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_snoc_pcnoc, QCS404_SNOC_PNOC_SLV, 8, -1, 28, QCS404_SNOC_PNOC_MAS);
+DEFINE_QNODE(slv_qdss_stm, QCS404_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_cats_0, QCS404_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, QCS404_SLAVE_OCMEM_64, 8, -1, -1, 0);
+DEFINE_QNODE(slv_lpass, QCS404_SLAVE_LPASS, 4, -1, -1, 0);
+
+static struct qcom_icc_node *qcs404_bimc_nodes[] = {
+ [MASTER_AMPSS_M0] = &mas_apps_proc,
+ [MASTER_OXILI] = &mas_oxili,
+ [MASTER_MDP_PORT0] = &mas_mdp,
+ [MASTER_SNOC_BIMC_1] = &mas_snoc_bimc_1,
+ [MASTER_TCU_0] = &mas_tcu_0,
+ [SLAVE_EBI_CH0] = &slv_ebi,
+ [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static struct qcom_icc_desc qcs404_bimc = {
+ .nodes = qcs404_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
+};
+
+static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
+ [MASTER_SPDM] = &mas_spdm,
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_BLSP_2] = &mas_blsp_2,
+ [MASTER_XI_USB_HS1] = &mas_xi_usb_hs1,
+ [MASTER_CRYPT0] = &mas_crypto,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [MASTER_QPIC] = &mas_qpic,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_2] = &pcnoc_int_2,
+ [PCNOC_INT_3] = &pcnoc_int_3,
+ [PCNOC_S_0] = &pcnoc_s_0,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_6] = &pcnoc_s_6,
+ [PCNOC_S_7] = &pcnoc_s_7,
+ [PCNOC_S_8] = &pcnoc_s_8,
+ [PCNOC_S_9] = &pcnoc_s_9,
+ [PCNOC_S_10] = &pcnoc_s_10,
+ [PCNOC_S_11] = &pcnoc_s_11,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLAVE_GPU_CFG] = &slv_gpu_cfg,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_BLSP_2] = &slv_blsp_2,
+ [SLAVE_TLMM_NORTH] = &slv_tlmm_north,
+ [SLAVE_PCIE] = &slv_pcie,
+ [SLAVE_ETHERNET] = &slv_ethernet,
+ [SLAVE_TLMM_EAST] = &slv_tlmm_east,
+ [SLAVE_TCU] = &slv_tcu,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_USB3] = &slv_usb3,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static struct qcom_icc_desc qcs404_pcnoc = {
+ .nodes = qcs404_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
+};
+
+static struct qcom_icc_node *qcs404_snoc_nodes[] = {
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
+ [MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_EMAC] = &mas_emac,
+ [MASTER_PCIE] = &mas_pcie,
+ [MASTER_USB3] = &mas_usb3,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_2] = &snoc_int_2,
+ [SLAVE_KPSS_AHB] = &slv_kpss_ahb,
+ [SLAVE_WCSS] = &slv_wcss,
+ [SLAVE_SNOC_BIMC_1] = &slv_snoc_bimc_1,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_CATS_0] = &slv_cats_0,
+ [SLAVE_CATS_1] = &slv_cats_1,
+ [SLAVE_LPASS] = &slv_lpass,
+};
+
+static struct qcom_icc_desc qcs404_snoc = {
+ .nodes = qcs404_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
+};
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ u64 sum_bw;
+ u64 max_peak_bw;
+ u64 rate;
+ u32 agg_avg = 0;
+ u32 agg_peak = 0;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* send bandwidth request message to the RPM processor */
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
+ GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(dev, "registered node %s\n", node->name);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ icc_provider_del(provider);
+
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id qcs404_noc_of_match[] = {
+ { .compatible = "qcom,qcs404-bimc", .data = &qcs404_bimc },
+ { .compatible = "qcom,qcs404-pcnoc", .data = &qcs404_pcnoc },
+ { .compatible = "qcom,qcs404-snoc", .data = &qcs404_snoc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
+
+static struct platform_driver qcs404_noc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-qcs404",
+ .of_match_table = qcs404_noc_of_match,
+ },
+};
+module_platform_driver(qcs404_noc_driver);
+MODULE_DESCRIPTION("Qualcomm QCS404 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 4915b78da673..57955596bb59 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -20,23 +20,6 @@
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
-#define BCM_TCS_CMD_COMMIT_SHFT 30
-#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
-#define BCM_TCS_CMD_VALID_SHFT 29
-#define BCM_TCS_CMD_VALID_MASK 0x20000000
-#define BCM_TCS_CMD_VOTE_X_SHFT 14
-#define BCM_TCS_CMD_VOTE_MASK 0x3fff
-#define BCM_TCS_CMD_VOTE_Y_SHFT 0
-#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
-
-#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
- (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
- ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
- ((cpu_to_le32(vote_x) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
- ((cpu_to_le32(vote_y) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
-
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
@@ -66,6 +49,22 @@ struct bcm_db {
#define SDM845_MAX_BCM_PER_NODE 2
#define SDM845_MAX_VCD 10
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC 0
+#define QCOM_ICC_BUCKET_WAKE 1
+#define QCOM_ICC_BUCKET_SLEEP 2
+#define QCOM_ICC_NUM_BUCKETS 3
+#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+ QCOM_ICC_TAG_SLEEP)
+
/**
* struct qcom_icc_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
@@ -86,8 +85,8 @@ struct qcom_icc_node {
u16 num_links;
u16 channels;
u16 buswidth;
- u64 sum_avg;
- u64 max_peak;
+ u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
+ u64 max_peak[QCOM_ICC_NUM_BUCKETS];
struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
size_t num_bcms;
};
@@ -112,8 +111,8 @@ struct qcom_icc_bcm {
const char *name;
u32 type;
u32 addr;
- u64 vote_x;
- u64 vote_y;
+ u64 vote_x[QCOM_ICC_NUM_BUCKETS];
+ u64 vote_y[QCOM_ICC_NUM_BUCKETS];
bool dirty;
bool keepalive;
struct bcm_db aux_data;
@@ -555,7 +554,7 @@ inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
cmd->wait = true;
}
-static void tcs_list_gen(struct list_head *bcm_list,
+static void tcs_list_gen(struct list_head *bcm_list, int bucket,
struct tcs_cmd tcs_list[SDM845_MAX_VCD],
int n[SDM845_MAX_VCD])
{
@@ -573,8 +572,8 @@ static void tcs_list_gen(struct list_head *bcm_list,
commit = true;
cur_vcd_size = 0;
}
- tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
- bcm->addr, commit);
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
+ bcm->vote_y[bucket], bcm->addr, commit);
idx++;
n[batch]++;
/*
@@ -595,38 +594,56 @@ static void tcs_list_gen(struct list_head *bcm_list,
static void bcm_aggregate(struct qcom_icc_bcm *bcm)
{
- size_t i;
- u64 agg_avg = 0;
- u64 agg_peak = 0;
+ size_t i, bucket;
+ u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
+ u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
u64 temp;
- for (i = 0; i < bcm->num_nodes; i++) {
- temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
- agg_avg = max(agg_avg, temp);
+ for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg[bucket] = max(agg_avg[bucket], temp);
- temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth);
- agg_peak = max(agg_peak, temp);
- }
+ temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak[bucket] = max(agg_peak[bucket], temp);
+ }
- temp = agg_avg * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_x = temp;
+ temp = agg_avg[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x[bucket] = temp;
- temp = agg_peak * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_y = temp;
+ temp = agg_peak[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y[bucket] = temp;
+ }
- if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
- bcm->vote_x = 1;
- bcm->vote_y = 1;
+ if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
+ bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
}
bcm->dirty = false;
}
-static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
+static void qcom_icc_pre_aggregate(struct icc_node *node)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ qn->sum_avg[i] = 0;
+ qn->max_peak[i] = 0;
+ }
+}
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
size_t i;
@@ -634,12 +651,19 @@ static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
qn = node->data;
+ if (!tag)
+ tag = QCOM_ICC_TAG_ALWAYS;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ if (tag & BIT(i)) {
+ qn->sum_avg[i] += avg_bw;
+ qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
+ }
+ }
+
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- qn->sum_avg = *agg_avg;
- qn->max_peak = *agg_peak;
-
for (i = 0; i < qn->num_bcms; i++)
qn->bcms[i]->dirty = true;
@@ -675,7 +699,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
* Construct the command list based on a pre ordered list of BCMs
* based on VCD.
*/
- tcs_list_gen(&commit_list, cmds, commit_idx);
+ tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
if (!commit_idx[0])
return ret;
@@ -693,6 +717,41 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
return ret;
}
+ INIT_LIST_HEAD(&commit_list);
+
+ for (i = 0; i < qp->num_bcms; i++) {
+ /*
+ * Only generate WAKE and SLEEP commands if a resource's
+ * requirements change as the execution environment transitions
+ * between different power states.
+ */
+ if (qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_WAKE] !=
+ qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
+ qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_WAKE] !=
+ qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_SLEEP]) {
+ list_add_tail(&qp->bcms[i]->list, &commit_list);
+ }
+ }
+
+ if (list_empty(&commit_list))
+ return ret;
+
+ tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
+
+ ret = rpmh_write_batch(qp->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
+ tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
+
+ ret = rpmh_write_batch(qp->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
return ret;
}
@@ -738,6 +797,7 @@ static int qnoc_probe(struct platform_device *pdev)
provider = &qp->provider;
provider->dev = &pdev->dev;
provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
provider->xlate = of_icc_xlate_onecell;
INIT_LIST_HEAD(&provider->nodes);
diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c
new file mode 100644
index 000000000000..dc8ff8d133a9
--- /dev/null
+++ b/drivers/interconnect/qcom/smd-rpm.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RPM over SMD communication wrapper for interconnects
+ *
+ * Copyright (C) 2019 Linaro Ltd
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include "smd-rpm.h"
+
+#define RPM_KEY_BW 0x00007762
+
+static struct qcom_smd_rpm *icc_smd_rpm;
+
+struct icc_rpm_smd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+bool qcom_icc_rpm_smd_available(void)
+{
+ return !!icc_smd_rpm;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_available);
+
+int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val)
+{
+ struct icc_rpm_smd_req req = {
+ .key = cpu_to_le32(RPM_KEY_BW),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(val),
+ };
+
+ return qcom_rpm_smd_write(icc_smd_rpm, ctx, rsc_type, id, &req,
+ sizeof(req));
+}
+EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_send);
+
+static int qcom_icc_rpm_smd_remove(struct platform_device *pdev)
+{
+ icc_smd_rpm = NULL;
+
+ return 0;
+}
+
+static int qcom_icc_rpm_smd_probe(struct platform_device *pdev)
+{
+ icc_smd_rpm = dev_get_drvdata(pdev->dev.parent);
+
+ if (!icc_smd_rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct platform_driver qcom_interconnect_rpm_smd_driver = {
+ .driver = {
+ .name = "icc_smd_rpm",
+ },
+ .probe = qcom_icc_rpm_smd_probe,
+ .remove = qcom_icc_rpm_smd_remove,
+};
+module_platform_driver(qcom_interconnect_rpm_smd_driver);
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm SMD RPM interconnect proxy driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:icc_smd_rpm");
diff --git a/drivers/interconnect/qcom/smd-rpm.h b/drivers/interconnect/qcom/smd-rpm.h
new file mode 100644
index 000000000000..ca9d0327b8ac
--- /dev/null
+++ b/drivers/interconnect/qcom/smd-rpm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SMD_RPM_H
+#define __DRIVERS_INTERCONNECT_QCOM_SMD_RPM_H
+
+#include <linux/soc/qcom/smd-rpm.h>
+
+bool qcom_icc_rpm_smd_available(void);
+int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val);
+
+#endif
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 45b61b8d8442..c55b63750757 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -362,15 +362,6 @@ config DS1682
This driver can also be built as a module. If so, the module
will be called ds1682.
-config SPEAR13XX_PCIE_GADGET
- bool "PCIe gadget support for SPEAr13XX platform"
- depends on ARCH_SPEAR13XX && BROKEN
- help
- This option enables gadget support for PCIe controller. If
- board file defines any controller as PCIe endpoint then a sysfs
- entry will be created for that controller. User can use these
- sysfs node to configure PCIe EP as per his requirements.
-
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8dae0a976200..c1860d35dc7e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
-obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index bcb10fa4bc3a..259fe1dfec03 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -334,8 +334,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int alcor_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alcor_pci_priv *priv = pci_get_drvdata(pdev);
+ struct alcor_pci_priv *priv = dev_get_drvdata(dev);
alcor_pci_aspm_ctrl(priv, 1);
return 0;
@@ -344,8 +343,7 @@ static int alcor_suspend(struct device *dev)
static int alcor_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alcor_pci_priv *priv = pci_get_drvdata(pdev);
+ struct alcor_pci_priv *priv = dev_get_drvdata(dev);
alcor_pci_aspm_ctrl(priv, 0);
return 0;
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index f2abe27010ef..0f791bfdc1f5 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -45,13 +45,16 @@ config EEPROM_AT25
will be called at25.
config EEPROM_LEGACY
- tristate "Old I2C EEPROM reader"
+ tristate "Old I2C EEPROM reader (DEPRECATED)"
depends on I2C && SYSFS
help
If you say yes here you get read-only access to the EEPROM data
available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
EEPROMs could theoretically be available on other devices as well.
+ This driver is deprecated and will be removed soon, please use the
+ better at24 driver instead.
+
This driver can also be built as a module. If so, the module
will be called eeprom.
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 6f00c33cfe22..b081c67416d7 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -195,13 +195,13 @@ static int ee1004_probe(struct i2c_client *client,
mutex_lock(&ee1004_bus_lock);
if (++ee1004_dev_count == 1) {
for (cnr = 0; cnr < 2; cnr++) {
- ee1004_set_page[cnr] = i2c_new_dummy(client->adapter,
+ ee1004_set_page[cnr] = i2c_new_dummy_device(client->adapter,
EE1004_ADDR_SET_PAGE + cnr);
- if (!ee1004_set_page[cnr]) {
+ if (IS_ERR(ee1004_set_page[cnr])) {
dev_err(&client->dev,
"address 0x%02x unavailable\n",
EE1004_ADDR_SET_PAGE + cnr);
- err = -EADDRINUSE;
+ err = PTR_ERR(ee1004_set_page[cnr]);
goto err_clients;
}
}
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 4d0cb90f4aeb..9da81f6d4a1c 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -150,9 +150,9 @@ static int max6875_probe(struct i2c_client *client,
return -ENOMEM;
/* A fake client is created on the odd address */
- data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1);
- if (!data->fake_client) {
- err = -ENOMEM;
+ data->fake_client = i2c_new_dummy_device(client->adapter, client->addr + 1);
+ if (IS_ERR(data->fake_client)) {
+ err = PTR_ERR(data->fake_client);
goto exit_kfree;
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 98603e235cf0..47ae84afac2e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -33,7 +33,6 @@
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
#define INIT_FILELEN_MAX (64 * 1024 * 1024)
-#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
/* Retrives number of input buffers from the scalars parameter */
@@ -186,6 +185,7 @@ struct fastrpc_channel_ctx {
struct idr ctx_idr;
struct list_head users;
struct miscdevice miscdev;
+ struct kref refcount;
};
struct fastrpc_user {
@@ -279,8 +279,11 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
- if (!buf->virt)
+ if (!buf->virt) {
+ mutex_destroy(&buf->lock);
+ kfree(buf);
return -ENOMEM;
+ }
if (fl->sctx && fl->sctx->sid)
buf->phys += ((u64)fl->sctx->sid << 32);
@@ -290,6 +293,25 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
return 0;
}
+static void fastrpc_channel_ctx_free(struct kref *ref)
+{
+ struct fastrpc_channel_ctx *cctx;
+
+ cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
+
+ kfree(cctx);
+}
+
+static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
+{
+ kref_get(&cctx->refcount);
+}
+
+static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
+{
+ kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
+}
+
static void fastrpc_context_free(struct kref *ref)
{
struct fastrpc_invoke_ctx *ctx;
@@ -313,6 +335,8 @@ static void fastrpc_context_free(struct kref *ref)
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx);
+
+ fastrpc_channel_ctx_put(cctx);
}
static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
@@ -419,6 +443,9 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
fastrpc_get_buff_overlaps(ctx);
}
+ /* Released in fastrpc_context_put() */
+ fastrpc_channel_ctx_get(cctx);
+
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
@@ -448,6 +475,7 @@ err_idr:
spin_lock(&user->lock);
list_del(&ctx->node);
spin_unlock(&user->lock);
+ fastrpc_channel_ctx_put(cctx);
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx);
@@ -522,6 +550,7 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_del(&a->node);
mutex_unlock(&buffer->lock);
+ sg_free_table(&a->sgt);
kfree(a);
}
@@ -884,6 +913,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (!fl->sctx)
return -EINVAL;
+ if (!fl->cctx->rpdev)
+ return -EPIPE;
+
ctx = fastrpc_context_alloc(fl, kernel, sc, args);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1120,6 +1152,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
}
fastrpc_session_free(cctx, fl->sctx);
+ fastrpc_channel_ctx_put(cctx);
mutex_destroy(&fl->mutex);
kfree(fl);
@@ -1138,6 +1171,9 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
if (!fl)
return -ENOMEM;
+ /* Released in fastrpc_device_release() */
+ fastrpc_channel_ctx_get(cctx);
+
filp->private_data = fl;
spin_lock_init(&fl->lock);
mutex_init(&fl->mutex);
@@ -1163,26 +1199,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
return 0;
}
-static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
-{
- struct dma_buf *buf;
- int info;
-
- if (copy_from_user(&info, argp, sizeof(info)))
- return -EFAULT;
-
- buf = dma_buf_get(info);
- if (IS_ERR_OR_NULL(buf))
- return -EINVAL;
- /*
- * one for the last get and other for the ALLOC_DMA_BUFF ioctl
- */
- dma_buf_put(buf);
- dma_buf_put(buf);
-
- return 0;
-}
-
static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_alloc_dma_buf bp;
@@ -1218,8 +1234,6 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
return -EFAULT;
}
- get_dma_buf(buf->dmabuf);
-
return 0;
}
@@ -1287,9 +1301,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
break;
- case FASTRPC_IOCTL_FREE_DMA_BUFF:
- err = fastrpc_dmabuf_free(fl, argp);
- break;
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
@@ -1395,10 +1406,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
int i, err, domain_id = -1;
const char *domain;
- data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
err = of_property_read_string(rdev->of_node, "label", &domain);
if (err) {
dev_info(rdev, "FastRPC Domain not specified in DT\n");
@@ -1417,6 +1424,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -EINVAL;
}
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
data->miscdev.minor = MISC_DYNAMIC_MINOR;
data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
domains[domain_id]);
@@ -1425,6 +1436,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
if (err)
return err;
+ kref_init(&data->refcount);
+
dev_set_drvdata(&rpdev->dev, data);
dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
INIT_LIST_HEAD(&data->users);
@@ -1459,7 +1472,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
misc_deregister(&cctx->miscdev);
of_platform_depopulate(&rpdev->dev);
- kfree(cctx);
+
+ cctx->rpdev = NULL;
+ fastrpc_channel_ctx_put(cctx);
}
static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
index 2c01461701a3..a2fdf31cf27c 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/asid.c
@@ -18,7 +18,7 @@ int hl_asid_init(struct hl_device *hdev)
mutex_init(&hdev->asid_mutex);
- /* ASID 0 is reserved for KMD and device CPU */
+ /* ASID 0 is reserved for the kernel driver and device CPU */
set_bit(0, hdev->asid_bitmap);
return 0;
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index e495f44064fa..53fddbd8e693 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -397,7 +397,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
HL_KERNEL_ASID_ID);
if (rc) {
- dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
+ dev_err(hdev->dev,
+ "Failed to allocate CB for the kernel driver %d\n", rc);
return NULL;
}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index f00d1c32f6d6..a9ac045dcfde 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -178,11 +178,23 @@ static void cs_do_release(struct kref *ref)
/* We also need to update CI for internal queues */
if (cs->submitted) {
- int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+ hdev->asic_funcs->hw_queues_lock(hdev);
- WARN_ONCE((cs_cnt < 0),
- "hl%d: error in CS active cnt %d\n",
- hdev->id, cs_cnt);
+ hdev->cs_active_cnt--;
+ if (!hdev->cs_active_cnt) {
+ struct hl_device_idle_busy_ts *ts;
+
+ ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
+ ts->busy_to_idle_ts = ktime_get();
+
+ if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
+ hdev->idle_busy_ts_idx = 0;
+ } else if (hdev->cs_active_cnt < 0) {
+ dev_crit(hdev->dev, "CS active cnt %d is negative\n",
+ hdev->cs_active_cnt);
+ }
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
hl_int_hw_queue_update_ci(cs);
@@ -305,6 +317,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
spin_unlock(&ctx->cs_lock);
+ dev_dbg(hdev->dev,
+ "Rejecting CS because of too many in-flights CS\n");
rc = -EAGAIN;
goto free_fence;
}
@@ -395,8 +409,9 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
return NULL;
}
- if (hw_queue_prop->kmd_only) {
- dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
+ if (hw_queue_prop->driver_only) {
+ dev_err(hdev->dev,
+ "Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
return NULL;
} else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 8682590e3f6e..17db7b3dfb4c 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -26,12 +26,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
dma_fence_put(ctx->cs_pending[i]);
if (ctx->asid != HL_KERNEL_ASID_ID) {
- /*
- * The engines are stopped as there is no executing CS, but the
+ /* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
+ * Stop only if this is the compute context, as there can be
+ * only one compute context
*/
- if (hdev->in_debug)
+ if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
hl_device_set_debug_mode(hdev, false);
hl_vm_ctx_fini(ctx);
@@ -67,29 +68,36 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
goto out_err;
}
+ mutex_lock(&mgr->ctx_lock);
+ rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&mgr->ctx_lock);
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
+ goto free_ctx;
+ }
+
+ ctx->handle = rc;
+
rc = hl_ctx_init(hdev, ctx, false);
if (rc)
- goto free_ctx;
+ goto remove_from_idr;
hl_hpriv_get(hpriv);
ctx->hpriv = hpriv;
- /* TODO: remove for multiple contexts */
+ /* TODO: remove for multiple contexts per process */
hpriv->ctx = ctx;
- hdev->user_ctx = ctx;
- mutex_lock(&mgr->ctx_lock);
- rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
- mutex_unlock(&mgr->ctx_lock);
-
- if (rc < 0) {
- dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
- hl_ctx_free(hdev, ctx);
- goto out_err;
- }
+ /* TODO: remove the following line for multiple process support */
+ hdev->compute_ctx = ctx;
return 0;
+remove_from_idr:
+ mutex_lock(&mgr->ctx_lock);
+ idr_remove(&mgr->ctx_handles, ctx->handle);
+ mutex_unlock(&mgr->ctx_lock);
free_ctx:
kfree(ctx);
out_err:
@@ -120,7 +128,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
ctx->thread_ctx_switch_wait_token = 0;
if (is_kernel_ctx) {
- ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
+ ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mmu ctx module\n");
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 18e499c900c7..87f37ac31ccd 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -29,7 +29,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_RD <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
@@ -55,12 +55,12 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_WR <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
- pkt.value = __cpu_to_le64(val);
+ pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
@@ -81,10 +81,10 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_LED_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.led_index = __cpu_to_le32(led);
- pkt.value = __cpu_to_le64(state);
+ pkt.led_index = cpu_to_le32(led);
+ pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
@@ -370,7 +370,7 @@ static int mmu_show(struct seq_file *s, void *data)
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
ctx = hdev->kernel_ctx;
else
- ctx = hdev->user_ctx;
+ ctx = hdev->compute_ctx;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
@@ -533,7 +533,7 @@ out:
static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
- struct hl_ctx *ctx = hdev->user_ctx;
+ struct hl_ctx *ctx = hdev->compute_ctx;
u64 hop_addr, hop_pte_addr, hop_pte;
u64 offset_mask = HOP4_MASK | OFFSET_MASK;
int rc = 0;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 7a8f9d0b71b5..459fee70a597 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,10 +42,12 @@ static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
struct hl_device *hdev;
+ struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
+ ctx = hpriv->ctx;
put_pid(hpriv->taskpid);
@@ -53,13 +55,12 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->restore_phase_mutex);
- kfree(hpriv);
-
- /* Now the FD is really closed */
- atomic_dec(&hdev->fd_open_cnt);
+ mutex_lock(&hdev->fpriv_list_lock);
+ list_del(&hpriv->dev_node);
+ hdev->compute_ctx = NULL;
+ mutex_unlock(&hdev->fpriv_list_lock);
- /* This allows a new user context to open the device */
- hdev->user_ctx = NULL;
+ kfree(hpriv);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -94,6 +95,24 @@ static int hl_device_release(struct inode *inode, struct file *filp)
return 0;
}
+static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
+{
+ struct hl_fpriv *hpriv = filp->private_data;
+ struct hl_device *hdev;
+
+ filp->private_data = NULL;
+
+ hdev = hpriv->hdev;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ list_del(&hpriv->dev_node);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ kfree(hpriv);
+
+ return 0;
+}
+
/*
* hl_mmap - mmap function for habanalabs device
*
@@ -124,55 +143,102 @@ static const struct file_operations hl_ops = {
.compat_ioctl = hl_ioctl
};
+static const struct file_operations hl_ctrl_ops = {
+ .owner = THIS_MODULE,
+ .open = hl_device_open_ctrl,
+ .release = hl_device_release_ctrl,
+ .unlocked_ioctl = hl_ioctl_control,
+ .compat_ioctl = hl_ioctl_control
+};
+
+static void device_release_func(struct device *dev)
+{
+ kfree(dev);
+}
+
/*
- * device_setup_cdev - setup cdev and device for habanalabs device
+ * device_init_cdev - Initialize cdev and device for habanalabs device
*
* @hdev: pointer to habanalabs device structure
* @hclass: pointer to the class object of the device
* @minor: minor number of the specific device
- * @fpos : file operations to install for this device
+ * @fpos: file operations to install for this device
+ * @name: name of the device as it will appear in the filesystem
+ * @cdev: pointer to the char device object that will be initialized
+ * @dev: pointer to the device object that will be initialized
*
- * Create a cdev and a Linux device for habanalabs's device. Need to be
- * called at the end of the habanalabs device initialization process,
- * because this function exposes the device to the user
+ * Initialize a cdev and a Linux device for habanalabs's device.
*/
-static int device_setup_cdev(struct hl_device *hdev, struct class *hclass,
- int minor, const struct file_operations *fops)
+static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
+ int minor, const struct file_operations *fops,
+ char *name, struct cdev *cdev,
+ struct device **dev)
{
- int err, devno = MKDEV(hdev->major, minor);
- struct cdev *hdev_cdev = &hdev->cdev;
- char *name;
+ cdev_init(cdev, fops);
+ cdev->owner = THIS_MODULE;
- name = kasprintf(GFP_KERNEL, "hl%d", hdev->id);
- if (!name)
+ *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
+ if (!*dev)
return -ENOMEM;
- cdev_init(hdev_cdev, fops);
- hdev_cdev->owner = THIS_MODULE;
- err = cdev_add(hdev_cdev, devno, 1);
- if (err) {
- pr_err("Failed to add char device %s\n", name);
- goto err_cdev_add;
+ device_initialize(*dev);
+ (*dev)->devt = MKDEV(hdev->major, minor);
+ (*dev)->class = hclass;
+ (*dev)->release = device_release_func;
+ dev_set_drvdata(*dev, hdev);
+ dev_set_name(*dev, "%s", name);
+
+ return 0;
+}
+
+static int device_cdev_sysfs_add(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = cdev_device_add(&hdev->cdev, hdev->dev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to add a char device to the system\n");
+ return rc;
}
- hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name);
- if (IS_ERR(hdev->dev)) {
- pr_err("Failed to create device %s\n", name);
- err = PTR_ERR(hdev->dev);
- goto err_device_create;
+ rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to add a control char device to the system\n");
+ goto delete_cdev_device;
}
- dev_set_drvdata(hdev->dev, hdev);
+ /* hl_sysfs_init() must be done after adding the device to the system */
+ rc = hl_sysfs_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize sysfs\n");
+ goto delete_ctrl_cdev_device;
+ }
- kfree(name);
+ hdev->cdev_sysfs_created = true;
return 0;
-err_device_create:
- cdev_del(hdev_cdev);
-err_cdev_add:
- kfree(name);
- return err;
+delete_ctrl_cdev_device:
+ cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
+delete_cdev_device:
+ cdev_device_del(&hdev->cdev, hdev->dev);
+ return rc;
+}
+
+static void device_cdev_sysfs_del(struct hl_device *hdev)
+{
+ /* device_release() won't be called so must free devices explicitly */
+ if (!hdev->cdev_sysfs_created) {
+ kfree(hdev->dev_ctrl);
+ kfree(hdev->dev);
+ return;
+ }
+
+ hl_sysfs_fini(hdev);
+ cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
+ cdev_device_del(&hdev->cdev, hdev->dev);
}
/*
@@ -227,20 +293,29 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
+ hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
+ sizeof(struct hl_device_idle_busy_ts),
+ (GFP_KERNEL | __GFP_ZERO));
+ if (!hdev->idle_busy_ts_arr) {
+ rc = -ENOMEM;
+ goto free_chip_info;
+ }
+
hl_cb_mgr_init(&hdev->kernel_cb_mgr);
- mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock);
mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
+ INIT_LIST_HEAD(&hdev->fpriv_list);
+ mutex_init(&hdev->fpriv_list_lock);
atomic_set(&hdev->in_reset, 0);
- atomic_set(&hdev->fd_open_cnt, 0);
- atomic_set(&hdev->cs_active_cnt, 0);
return 0;
+free_chip_info:
+ kfree(hdev->hl_chip_info);
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
@@ -266,8 +341,11 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
+ mutex_destroy(&hdev->fpriv_list_lock);
+
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+ kfree(hdev->idle_busy_ts_arr);
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq);
@@ -277,8 +355,6 @@ static void device_early_fini(struct hl_device *hdev)
if (hdev->asic_funcs->early_fini)
hdev->asic_funcs->early_fini(hdev);
-
- mutex_destroy(&hdev->fd_open_cnt_lock);
}
static void set_freq_to_low_job(struct work_struct *work)
@@ -286,9 +362,13 @@ static void set_freq_to_low_job(struct work_struct *work)
struct hl_device *hdev = container_of(work, struct hl_device,
work_freq.work);
- if (atomic_read(&hdev->fd_open_cnt) == 0)
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (!hdev->compute_ctx)
hl_device_set_frequency(hdev, PLL_LOW);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
schedule_delayed_work(&hdev->work_freq,
usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
}
@@ -338,7 +418,7 @@ static int device_late_init(struct hl_device *hdev)
hdev->high_pll = hdev->asic_prop.high_pll;
/* force setting to low frequency */
- atomic_set(&hdev->curr_pll_profile, PLL_LOW);
+ hdev->curr_pll_profile = PLL_LOW;
if (hdev->pm_mng_profile == PM_AUTO)
hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
@@ -381,44 +461,128 @@ static void device_late_fini(struct hl_device *hdev)
hdev->late_init_done = false;
}
+uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
+{
+ struct hl_device_idle_busy_ts *ts;
+ ktime_t zero_ktime, curr = ktime_get();
+ u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
+ s64 period_us, last_start_us, last_end_us, last_busy_time_us,
+ total_busy_time_us = 0, total_busy_time_ms;
+
+ zero_ktime = ktime_set(0, 0);
+ period_us = period_ms * USEC_PER_MSEC;
+ ts = &hdev->idle_busy_ts_arr[last_index];
+
+ /* check case that device is currently in idle */
+ if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
+ !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
+
+ last_index--;
+ /* Handle case idle_busy_ts_idx was 0 */
+ if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
+ last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
+
+ ts = &hdev->idle_busy_ts_arr[last_index];
+ }
+
+ while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
+ /* Check if we are in last sample case. i.e. if the sample
+ * begun before the sampling period. This could be a real
+ * sample or 0 so need to handle both cases
+ */
+ last_start_us = ktime_to_us(
+ ktime_sub(curr, ts->idle_to_busy_ts));
+
+ if (last_start_us > period_us) {
+
+ /* First check two cases:
+ * 1. If the device is currently busy
+ * 2. If the device was idle during the whole sampling
+ * period
+ */
+
+ if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
+ /* Check if the device is currently busy */
+ if (ktime_compare(ts->idle_to_busy_ts,
+ zero_ktime))
+ return 100;
+
+ /* We either didn't have any activity or we
+ * reached an entry which is 0. Either way,
+ * exit and return what was accumulated so far
+ */
+ break;
+ }
+
+ /* If sample has finished, check it is relevant */
+ last_end_us = ktime_to_us(
+ ktime_sub(curr, ts->busy_to_idle_ts));
+
+ if (last_end_us > period_us)
+ break;
+
+ /* It is relevant so add it but with adjustment */
+ last_busy_time_us = ktime_to_us(
+ ktime_sub(ts->busy_to_idle_ts,
+ ts->idle_to_busy_ts));
+ total_busy_time_us += last_busy_time_us -
+ (last_start_us - period_us);
+ break;
+ }
+
+ /* Check if the sample is finished or still open */
+ if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
+ last_busy_time_us = ktime_to_us(
+ ktime_sub(ts->busy_to_idle_ts,
+ ts->idle_to_busy_ts));
+ else
+ last_busy_time_us = ktime_to_us(
+ ktime_sub(curr, ts->idle_to_busy_ts));
+
+ total_busy_time_us += last_busy_time_us;
+
+ last_index--;
+ /* Handle case idle_busy_ts_idx was 0 */
+ if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
+ last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
+
+ ts = &hdev->idle_busy_ts_arr[last_index];
+
+ overlap_cnt++;
+ }
+
+ total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
+ USEC_PER_MSEC);
+
+ return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
+}
+
/*
* hl_device_set_frequency - set the frequency of the device
*
* @hdev: pointer to habanalabs device structure
* @freq: the new frequency value
*
- * Change the frequency if needed.
- * We allose to set PLL to low only if there is no user process
- * Returns 0 if no change was done, otherwise returns 1;
+ * Change the frequency if needed. This function has no protection against
+ * concurrency, therefore it is assumed that the calling function has protected
+ * itself against the case of calling this function from multiple threads with
+ * different values
+ *
+ * Returns 0 if no change was done, otherwise returns 1
*/
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
{
- enum hl_pll_frequency old_freq =
- (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH;
- int ret;
-
- if (hdev->pm_mng_profile == PM_MANUAL)
- return 0;
-
- ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
- if (ret == freq)
+ if ((hdev->pm_mng_profile == PM_MANUAL) ||
+ (hdev->curr_pll_profile == freq))
return 0;
- /*
- * in case we want to lower frequency, check if device is not
- * opened. We must have a check here to workaround race condition with
- * hl_device_open
- */
- if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
- atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
- return 0;
- }
-
dev_dbg(hdev->dev, "Changing device frequency to %s\n",
freq == PLL_HIGH ? "high" : "low");
hdev->asic_funcs->set_pll_profile(hdev, freq);
+ hdev->curr_pll_profile = freq;
+
return 1;
}
@@ -449,19 +613,8 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
- mutex_lock(&hdev->fd_open_cnt_lock);
-
- if (atomic_read(&hdev->fd_open_cnt) > 1) {
- dev_err(hdev->dev,
- "Failed to enable debug mode. More then a single user is using the device\n");
- rc = -EPERM;
- goto unlock_fd_open_lock;
- }
-
hdev->in_debug = 1;
-unlock_fd_open_lock:
- mutex_unlock(&hdev->fd_open_cnt_lock);
out:
mutex_unlock(&hdev->debug_lock);
@@ -568,6 +721,7 @@ disable_device:
static void device_kill_open_processes(struct hl_device *hdev)
{
u16 pending_total, pending_cnt;
+ struct hl_fpriv *hpriv;
struct task_struct *task = NULL;
if (hdev->pldm)
@@ -575,32 +729,31 @@ static void device_kill_open_processes(struct hl_device *hdev)
else
pending_total = HL_PENDING_RESET_PER_SEC;
- pending_cnt = pending_total;
-
- /* Flush all processes that are inside hl_open */
- mutex_lock(&hdev->fd_open_cnt_lock);
-
- while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
-
- pending_cnt--;
-
- dev_info(hdev->dev,
- "Can't HARD reset, waiting for user to close FD\n");
+ /* Giving time for user to close FD, and for processes that are inside
+ * hl_device_open to finish
+ */
+ if (!list_empty(&hdev->fpriv_list))
ssleep(1);
- }
- if (atomic_read(&hdev->fd_open_cnt)) {
- task = get_pid_task(hdev->user_ctx->hpriv->taskpid,
- PIDTYPE_PID);
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ /* This section must be protected because we are dereferencing
+ * pointers that are freed if the process exits
+ */
+ list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) {
- dev_info(hdev->dev, "Killing user processes\n");
+ dev_info(hdev->dev, "Killing user process pid=%d\n",
+ task_pid_nr(task));
send_sig(SIGKILL, task, 1);
- msleep(100);
+ usleep_range(1000, 10000);
put_task_struct(task);
}
}
+ mutex_unlock(&hdev->fpriv_list_lock);
+
/* We killed the open users, but because the driver cleans up after the
* user contexts are closed (e.g. mmu mappings), we need to wait again
* to make sure the cleaning phase is finished before continuing with
@@ -609,19 +762,18 @@ static void device_kill_open_processes(struct hl_device *hdev)
pending_cnt = pending_total;
- while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+ while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
+ dev_info(hdev->dev,
+ "Waiting for all unmap operations to finish before hard reset\n");
pending_cnt--;
ssleep(1);
}
- if (atomic_read(&hdev->fd_open_cnt))
+ if (!list_empty(&hdev->fpriv_list))
dev_crit(hdev->dev,
"Going to hard reset with open user contexts\n");
-
- mutex_unlock(&hdev->fd_open_cnt_lock);
-
}
static void device_hard_reset_pending(struct work_struct *work)
@@ -630,8 +782,6 @@ static void device_hard_reset_pending(struct work_struct *work)
container_of(work, struct hl_device_reset_work, reset_work);
struct hl_device *hdev = device_reset_work->hdev;
- device_kill_open_processes(hdev);
-
hl_device_reset(hdev, true, true);
kfree(device_reset_work);
@@ -679,13 +829,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
+ /* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W
*/
hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev);
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
dev_err(hdev->dev, "Going to RESET device!\n");
}
@@ -736,6 +889,13 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
+ /* Kill processes here after CS rollback. This is because the process
+ * can't really exit until all its CSs are done, which is what we
+ * do in cs rollback
+ */
+ if (from_hard_reset_thread)
+ device_kill_open_processes(hdev);
+
/* Release kernel context */
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
@@ -754,12 +914,24 @@ again:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
+ hdev->idle_busy_ts_idx = 0;
+ hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
+ hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
+
+ if (hdev->cs_active_cnt)
+ dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
+ hdev->cs_active_cnt);
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
/* Make sure the context switch phase will run again */
- if (hdev->user_ctx) {
- atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
- hdev->user_ctx->thread_ctx_switch_wait_token = 0;
+ if (hdev->compute_ctx) {
+ atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
+ hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
}
+ mutex_unlock(&hdev->fpriv_list_lock);
+
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
@@ -788,7 +960,7 @@ again:
goto out_err;
}
- hdev->user_ctx = NULL;
+ hdev->compute_ctx = NULL;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
@@ -849,6 +1021,8 @@ again:
else
hdev->soft_reset_cnt++;
+ dev_warn(hdev->dev, "Successfully finished resetting the device\n");
+
return 0;
out_err:
@@ -883,17 +1057,43 @@ out_err:
int hl_device_init(struct hl_device *hdev, struct class *hclass)
{
int i, rc, cq_ready_cnt;
+ char *name;
+ bool add_cdev_sysfs_on_err = false;
- /* Create device */
- rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops);
+ name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
+ if (!name) {
+ rc = -ENOMEM;
+ goto out_disabled;
+ }
+
+ /* Initialize cdev and device structures */
+ rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
+ &hdev->cdev, &hdev->dev);
+
+ kfree(name);
if (rc)
goto out_disabled;
+ name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
+ if (!name) {
+ rc = -ENOMEM;
+ goto free_dev;
+ }
+
+ /* Initialize cdev and device structures for control device */
+ rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
+ name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
+
+ kfree(name);
+
+ if (rc)
+ goto free_dev;
+
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
- goto release_device;
+ goto free_dev_ctrl;
/*
* Start calling ASIC initialization. First S/W then H/W and finally
@@ -965,7 +1165,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto mmu_fini;
}
- hdev->user_ctx = NULL;
+ hdev->compute_ctx = NULL;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
@@ -980,12 +1180,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto release_ctx;
}
- rc = hl_sysfs_init(hdev);
- if (rc) {
- dev_err(hdev->dev, "failed to initialize sysfs\n");
- goto free_cb_pool;
- }
-
hl_debugfs_add_device(hdev);
if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
@@ -994,6 +1188,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->asic_funcs->hw_fini(hdev, true);
}
+ /*
+ * From this point, in case of an error, add char devices and create
+ * sysfs nodes as part of the error flow, to allow debugging.
+ */
+ add_cdev_sysfs_on_err = true;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W\n");
@@ -1030,9 +1230,24 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
/*
- * hl_hwmon_init must be called after device_late_init, because only
+ * Expose devices and sysfs nodes to user.
+ * From here there is no need to add char devices and create sysfs nodes
+ * in case of an error.
+ */
+ add_cdev_sysfs_on_err = false;
+ rc = device_cdev_sysfs_add(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add char devices and sysfs nodes\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ /*
+ * hl_hwmon_init() must be called after device_late_init(), because only
* there we get the information from the device about which
- * hwmon-related sensors the device supports
+ * hwmon-related sensors the device supports.
+ * Furthermore, it must be done after adding the device to the system.
*/
rc = hl_hwmon_init(hdev);
if (rc) {
@@ -1048,8 +1263,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
return 0;
-free_cb_pool:
- hl_cb_pool_fini(hdev);
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
@@ -1068,18 +1281,21 @@ sw_fini:
hdev->asic_funcs->sw_fini(hdev);
early_fini:
device_early_fini(hdev);
-release_device:
- device_destroy(hclass, hdev->dev->devt);
- cdev_del(&hdev->cdev);
+free_dev_ctrl:
+ kfree(hdev->dev_ctrl);
+free_dev:
+ kfree(hdev->dev);
out_disabled:
hdev->disabled = true;
+ if (add_cdev_sysfs_on_err)
+ device_cdev_sysfs_add(hdev);
if (hdev->pdev)
dev_err(&hdev->pdev->dev,
"Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id);
+ hdev->id / 2);
else
pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id);
+ hdev->id / 2);
return rc;
}
@@ -1120,16 +1336,17 @@ void hl_device_fini(struct hl_device *hdev)
/* Mark device as disabled */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
+ /* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W
*/
hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev);
- hdev->hard_reset_pending = true;
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
- device_kill_open_processes(hdev);
+ hdev->hard_reset_pending = true;
hl_hwmon_fini(hdev);
@@ -1137,8 +1354,6 @@ void hl_device_fini(struct hl_device *hdev)
hl_debugfs_remove_device(hdev);
- hl_sysfs_fini(hdev);
-
/*
* Halt the engines and disable interrupts so we won't get any more
* completions from H/W and we won't have any accesses from the
@@ -1149,6 +1364,12 @@ void hl_device_fini(struct hl_device *hdev)
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
+ /* Kill processes here after CS rollback. This is because the process
+ * can't really exit until all its CSs are done, which is what we
+ * do in cs rollback
+ */
+ device_kill_open_processes(hdev);
+
hl_cb_pool_fini(hdev);
/* Release kernel context */
@@ -1175,9 +1396,8 @@ void hl_device_fini(struct hl_device *hdev)
device_early_fini(hdev);
- /* Hide device from user */
- device_destroy(hdev->dev->class, hdev->dev->devt);
- cdev_del(&hdev->cdev);
+ /* Hide devices and sysfs nodes from user */
+ device_cdev_sysfs_del(hdev);
pr_info("removed device successfully\n");
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 271c5c8f53b4..6fba14b81f90 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -9,6 +9,7 @@
#include "include/hw_ip/mmu/mmu_general.h"
#include "include/hw_ip/mmu/mmu_v1_0.h"
#include "include/goya/asic_reg/goya_masks.h"
+#include "include/goya/goya_reg_map.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
@@ -41,8 +42,8 @@
* PQ, CQ and CP are not secured.
* PQ, CB and the data are on the SRAM/DRAM.
*
- * Since QMAN DMA is secured, KMD is parsing the DMA CB:
- * - KMD checks DMA pointer
+ * Since QMAN DMA is secured, the driver is parsing the DMA CB:
+ * - checks DMA pointer
* - WREG, MSG_PROT are not allowed.
* - MSG_LONG/SHORT are allowed.
*
@@ -55,15 +56,15 @@
* QMAN DMA: PQ, CQ and CP are secured.
* MMU is set to bypass on the Secure props register of the QMAN.
* The reasons we don't enable MMU for PQ, CQ and CP are:
- * - PQ entry is in kernel address space and KMD doesn't map it.
+ * - PQ entry is in kernel address space and the driver doesn't map it.
* - CP writes to MSIX register and to kernel address space (completion
* queue).
*
- * DMA is not secured but because CP is secured, KMD still needs to parse the
- * CB, but doesn't need to check the DMA addresses.
+ * DMA is not secured but because CP is secured, the driver still needs to parse
+ * the CB, but doesn't need to check the DMA addresses.
*
- * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
- * doesn't map memory in MMU.
+ * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
+ * the driver doesn't map memory in MMU.
*
* QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
*
@@ -335,18 +336,18 @@ void goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
- prop->hw_queues_props[i].kmd_only = 0;
+ prop->hw_queues_props[i].driver_only = 0;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
- prop->hw_queues_props[i].kmd_only = 1;
+ prop->hw_queues_props[i].driver_only = 1;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
- prop->hw_queues_props[i].kmd_only = 0;
+ prop->hw_queues_props[i].driver_only = 0;
}
for (; i < HL_MAX_QUEUES; i++)
@@ -1006,36 +1007,34 @@ int goya_init_cpu_queues(struct hl_device *hdev)
eq = &hdev->event_queue;
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0,
- lower_32_bits(cpu_pq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1,
- upper_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(eq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address));
+ WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
+ WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8,
+ WREG32(mmCPU_CQ_BASE_ADDR_LOW,
lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9,
+ WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
+ WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
/* Used for EQ CI */
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
+ WREG32(mmCPU_EQ_CI, 0);
WREG32(mmCPU_IF_PF_PQ_PI, 0);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
+ WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
err = hl_poll_timeout(
hdev,
- mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
+ mmCPU_PQ_INIT_STATUS,
status,
(status == PQ_INIT_STATUS_READY_FOR_HOST),
1000,
@@ -2063,6 +2062,25 @@ static void goya_disable_msix(struct hl_device *hdev)
goya->hw_cap_initialized &= ~HW_CAP_MSIX;
}
+static void goya_enable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+
+ /* Zero the lower/upper parts of the 64-bit counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
+
+ /* Enable the counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
+}
+
+static void goya_disable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+}
+
static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
{
u32 wait_timeout_ms, cpu_timeout_ms;
@@ -2103,6 +2121,8 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
goya_disable_external_queues(hdev);
goya_disable_internal_queues(hdev);
+ goya_disable_timestamp(hdev);
+
if (hard_reset) {
goya_disable_msix(hdev);
goya_mmu_remove_device_cpu_mappings(hdev);
@@ -2205,12 +2225,12 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
switch (fwc) {
case FW_COMP_UBOOT:
- ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
+ ver_off = RREG32(mmUBOOT_VER_OFFSET);
dest = hdev->asic_prop.uboot_ver;
name = "U-Boot";
break;
case FW_COMP_PREBOOT:
- ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
+ ver_off = RREG32(mmPREBOOT_VER_OFFSET);
dest = hdev->asic_prop.preboot_ver;
name = "Preboot";
break;
@@ -2469,7 +2489,7 @@ static int goya_hw_init(struct hl_device *hdev)
* we need to reset the chip before doing H/W init. This register is
* cleared by the H/W upon H/W reset
*/
- WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
+ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
if (rc) {
@@ -2505,6 +2525,8 @@ static int goya_hw_init(struct hl_device *hdev)
goya_init_tpc_qmans(hdev);
+ goya_enable_timestamp(hdev);
+
/* MSI-X must be enabled before CPU queues are initialized */
rc = goya_enable_msix(hdev);
if (rc)
@@ -2831,7 +2853,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
dev_err_ratelimited(hdev->dev,
- "Can't send KMD job on QMAN0 because the device is not idle\n");
+ "Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
}
@@ -3949,7 +3971,7 @@ void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
void goya_update_eq_ci(struct hl_device *hdev, u32 val)
{
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
+ WREG32(mmCPU_EQ_CI, val);
}
void goya_restore_phase_topology(struct hl_device *hdev)
@@ -4447,6 +4469,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
struct goya_device *goya = hdev->asic_specific;
goya->events_stat[event_type]++;
+ goya->events_stat_aggregate[event_type]++;
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_PCIE_IF:
@@ -4528,12 +4551,16 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
}
}
-void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
+void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
{
struct goya_device *goya = hdev->asic_specific;
- *size = (u32) sizeof(goya->events_stat);
+ if (aggregate) {
+ *size = (u32) sizeof(goya->events_stat_aggregate);
+ return goya->events_stat_aggregate;
+ }
+ *size = (u32) sizeof(goya->events_stat);
return goya->events_stat;
}
@@ -4934,6 +4961,10 @@ int goya_armcp_info_get(struct hl_device *hdev)
prop->dram_end_address = prop->dram_base_address + dram_size;
}
+ if (!strlen(prop->armcp_info.card_name))
+ strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
+
return 0;
}
@@ -5047,7 +5078,7 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
{
- return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
+ return RREG32(mmHW_STATE);
}
static const struct hl_asic_funcs goya_funcs = {
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index d7f48c9c41cd..89b6574f8e4f 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -55,6 +55,8 @@
#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
+#define GOYA_DEFAULT_CARD_NAME "HL1000"
+
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
@@ -68,19 +70,19 @@
MMU_PAGE_TABLES_SIZE)
#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
MMU_DRAM_DEFAULT_PAGE_SIZE)
-#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \
+#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + \
MMU_CACHE_MNG_SIZE)
#define DRAM_BASE_ADDR_USER 0x20000000
-#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER)
-#error "KMD must reserve no more than 512MB"
+#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER)
+#error "Driver must reserve no more than 512MB"
#endif
/*
- * SRAM Memory Map for KMD
+ * SRAM Memory Map for Driver
*
- * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for
+ * Driver occupies DRIVER_SRAM_SIZE bytes from the start of SRAM. It is used for
* MME/TPC QMANs
*
*/
@@ -106,10 +108,10 @@
#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \
(TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
-#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
+#define SRAM_DRIVER_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
(TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
-#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
+#if (SRAM_DRIVER_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
#error "MME/TPC QMANs SRAM space exceeds limit"
#endif
@@ -162,6 +164,7 @@ struct goya_device {
u64 ddr_bar_cur_addr;
u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
+ u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE];
u32 hw_cap_initialized;
u8 device_cpu_mmu_mappings_done;
};
@@ -215,7 +218,7 @@ int goya_suspend(struct hl_device *hdev);
int goya_resume(struct hl_device *hdev);
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
-void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
+void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index d7ec7ad84cc6..b4d406af1bed 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -15,6 +15,10 @@
#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
+#define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET
+#define SPMU_EVENT_TYPES_OFFSET 0x400
+#define SPMU_MAX_COUNTERS 6
+
static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
[GOYA_STM_CPU] = mmCPU_STM_BASE,
[GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
@@ -226,9 +230,16 @@ static int goya_config_stm(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_stm *input;
- u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
int rc;
+ if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
+ dev_err(hdev->dev, "Invalid register index in STM\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
if (params->enable) {
@@ -288,10 +299,17 @@ static int goya_config_etf(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etf *input;
- u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
u32 val;
int rc;
+ if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
+ dev_err(hdev->dev, "Invalid register index in ETF\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
val = RREG32(base_reg + 0x304);
@@ -445,11 +463,18 @@ static int goya_config_etr(struct hl_device *hdev,
static int goya_config_funnel(struct hl_device *hdev,
struct hl_debug_params *params)
{
- WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0,
- CORESIGHT_UNLOCK);
+ u64 base_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
+ dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
- WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE,
- params->enable ? 0x33F : 0);
+ WREG32(base_reg, params->enable ? 0x33F : 0);
return 0;
}
@@ -458,9 +483,16 @@ static int goya_config_bmon(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_bmon *input;
- u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
u32 pcie_base = 0;
+ if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
+ dev_err(hdev->dev, "Invalid register index in BMON\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+
WREG32(base_reg + 0x104, 1);
if (params->enable) {
@@ -522,7 +554,7 @@ static int goya_config_bmon(struct hl_device *hdev,
static int goya_config_spmu(struct hl_device *hdev,
struct hl_debug_params *params)
{
- u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
struct hl_debug_params_spmu *input = params->input;
u64 *output;
u32 output_arr_len;
@@ -531,6 +563,13 @@ static int goya_config_spmu(struct hl_device *hdev,
u32 cycle_cnt_idx;
int i;
+ if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
+ dev_err(hdev->dev, "Invalid register index in SPMU\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+
if (params->enable) {
input = params->input;
@@ -539,7 +578,13 @@ static int goya_config_spmu(struct hl_device *hdev,
if (input->event_types_num < 3) {
dev_err(hdev->dev,
- "not enough values for SPMU enable\n");
+ "not enough event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ if (input->event_types_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many event types values for SPMU enable\n");
return -EINVAL;
}
@@ -547,7 +592,8 @@ static int goya_config_spmu(struct hl_device *hdev,
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < input->event_types_num ; i++)
- WREG32(base_reg + 0x400 + i * 4, input->event_types[i]);
+ WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
+ input->event_types[i]);
WREG32(base_reg + 0xE04, 0x41013041);
WREG32(base_reg + 0xC00, 0x8000003F);
@@ -567,6 +613,12 @@ static int goya_config_spmu(struct hl_device *hdev,
return -EINVAL;
}
+ if (events_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many events values for SPMU disable\n");
+ return -EINVAL;
+ }
+
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < events_num ; i++)
@@ -584,24 +636,11 @@ static int goya_config_spmu(struct hl_device *hdev,
return 0;
}
-static int goya_config_timestamp(struct hl_device *hdev,
- struct hl_debug_params *params)
-{
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
- if (params->enable) {
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
- }
-
- return 0;
-}
-
int goya_debug_coresight(struct hl_device *hdev, void *data)
{
struct hl_debug_params *params = data;
u32 val;
- int rc;
+ int rc = 0;
switch (params->op) {
case HL_DEBUG_OP_STM:
@@ -623,7 +662,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
rc = goya_config_spmu(hdev, params);
break;
case HL_DEBUG_OP_TIMESTAMP:
- rc = goya_config_timestamp(hdev, params);
+ /* Do nothing as this opcode is deprecated */
break;
default:
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 088692c852b6..a2a700c3d597 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -230,18 +230,127 @@ static ssize_t ic_clk_curr_show(struct device *dev,
return sprintf(buf, "%lu\n", value);
}
+static ssize_t pm_mng_profile_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n",
+ (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
+ (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
+ "unknown");
+}
+
+static ssize_t pm_mng_profile_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (hdev->compute_ctx) {
+ dev_err(hdev->dev,
+ "Can't change PM profile while compute context is opened on the device\n");
+ count = -EPERM;
+ goto unlock_mutex;
+ }
+
+ if (strncmp("auto", buf, strlen("auto")) == 0) {
+ /* Make sure we are in LOW PLL when changing modes */
+ if (hdev->pm_mng_profile == PM_MANUAL) {
+ hdev->curr_pll_profile = PLL_HIGH;
+ hl_device_set_frequency(hdev, PLL_LOW);
+ hdev->pm_mng_profile = PM_AUTO;
+ }
+ } else if (strncmp("manual", buf, strlen("manual")) == 0) {
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ /* Must release the lock because the work thread also
+ * takes this lock. But before we release it, set
+ * the mode to manual so nothing will change if a user
+ * suddenly opens the device
+ */
+ hdev->pm_mng_profile = PM_MANUAL;
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ /* Flush the current work so we can return to the user
+ * knowing that he is the only one changing frequencies
+ */
+ flush_delayed_work(&hdev->work_freq);
+
+ return count;
+ }
+ } else {
+ dev_err(hdev->dev, "value should be auto or manual\n");
+ count = -EINVAL;
+ }
+
+unlock_mutex:
+ mutex_unlock(&hdev->fpriv_list_lock);
+out:
+ return count;
+}
+
+static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", hdev->high_pll);
+}
+
+static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hdev->high_pll = value;
+
+out:
+ return count;
+}
+
+static DEVICE_ATTR_RW(high_pll);
static DEVICE_ATTR_RW(ic_clk);
static DEVICE_ATTR_RO(ic_clk_curr);
static DEVICE_ATTR_RW(mme_clk);
static DEVICE_ATTR_RO(mme_clk_curr);
+static DEVICE_ATTR_RW(pm_mng_profile);
static DEVICE_ATTR_RW(tpc_clk);
static DEVICE_ATTR_RO(tpc_clk_curr);
static struct attribute *goya_dev_attrs[] = {
+ &dev_attr_high_pll.attr,
&dev_attr_ic_clk.attr,
&dev_attr_ic_clk_curr.attr,
&dev_attr_mme_clk.attr,
&dev_attr_mme_clk_curr.attr,
+ &dev_attr_pm_mng_profile.attr,
&dev_attr_tpc_clk.attr,
&dev_attr_tpc_clk_curr.attr,
NULL,
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index ce83adafcf2d..75862be53c60 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -36,6 +36,8 @@
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
+#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
+
#define HL_MAX_QUEUES 128
#define HL_MAX_JOBS_PER_CS 64
@@ -43,6 +45,8 @@
/* MUST BE POWER OF 2 and larger than 1 */
#define HL_MAX_PENDING_CS 64
+#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
+
/* Memory */
#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -92,12 +96,12 @@ enum hl_queue_type {
/**
* struct hw_queue_properties - queue information.
* @type: queue type.
- * @kmd_only: true if only KMD is allowed to send a job to this queue, false
- * otherwise.
+ * @driver_only: true if only the driver is allowed to send a job to this queue,
+ * false otherwise.
*/
struct hw_queue_properties {
enum hl_queue_type type;
- u8 kmd_only;
+ u8 driver_only;
};
/**
@@ -320,7 +324,7 @@ struct hl_cs_job;
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
-/* KMD <-> ArmCP shared memory size */
+/* Host <-> ArmCP shared memory size */
#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
/**
@@ -401,7 +405,7 @@ struct hl_cs_parser;
/**
* enum hl_pm_mng_profile - power management profile.
- * @PM_AUTO: internal clock is set by KMD.
+ * @PM_AUTO: internal clock is set by the Linux driver.
* @PM_MANUAL: internal clock is set by the user.
* @PM_LAST: last power management type.
*/
@@ -554,7 +558,8 @@ struct hl_asic_funcs {
struct hl_eq_entry *eq_entry);
void (*set_pll_profile)(struct hl_device *hdev,
enum hl_pll_frequency freq);
- void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
+ void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
+ u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
@@ -608,7 +613,7 @@ struct hl_va_range {
* descriptor (hl_vm_phys_pg_list or hl_userptr).
* @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
- * @hpriv: pointer to the private (KMD) data of the process (fd).
+ * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
@@ -634,6 +639,7 @@ struct hl_va_range {
* execution phase before the context switch phase
* has finished.
* @asid: context's unique address space ID in the device's MMU.
+ * @handle: context's opaque handle for user
*/
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
@@ -655,6 +661,7 @@ struct hl_ctx {
atomic_t thread_ctx_switch_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
+ u32 handle;
};
/**
@@ -906,23 +913,27 @@ struct hl_debug_params {
* @hdev: habanalabs device structure.
* @filp: pointer to the given file structure.
* @taskpid: current process ID.
- * @ctx: current executing context.
+ * @ctx: current executing context. TODO: remove for multiple ctx per process
* @ctx_mgr: context manager to handle multiple context for this FD.
* @cb_mgr: command buffer manager to handle multiple buffers for this FD.
* @debugfs_list: list of relevant ASIC debugfs.
+ * @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
+ * @is_control: true for control device, false otherwise
*/
struct hl_fpriv {
struct hl_device *hdev;
struct file *filp;
struct pid *taskpid;
- struct hl_ctx *ctx; /* TODO: remove for multiple ctx */
+ struct hl_ctx *ctx;
struct hl_ctx_mgr ctx_mgr;
struct hl_cb_mgr cb_mgr;
struct list_head debugfs_list;
+ struct list_head dev_node;
struct kref refcount;
struct mutex restore_phase_mutex;
+ u8 is_control;
};
@@ -1009,7 +1020,7 @@ struct hl_dbg_device_entry {
*/
/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
- * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
+ * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
*/
#define HL_MAX_MINORS 256
@@ -1041,14 +1052,18 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
(val) << REG_FIELD_SHIFT(reg, field))
+/* Timeout should be longer when working with simulator but cap the
+ * increased timeout to some maximum
+ */
#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t __timeout; \
- /* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
- __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ __timeout = ktime_add_us(ktime_get(),\
+ min((u64)(timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
(val) = RREG32(addr); \
@@ -1080,24 +1095,25 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
mem_written_by_device) \
({ \
ktime_t __timeout; \
- /* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
- __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ __timeout = ktime_add_us(ktime_get(),\
+ min((u64)(timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
/* Verify we read updates done by other cores or by device */ \
mb(); \
(val) = *((u32 *) (uintptr_t) (addr)); \
if (mem_written_by_device) \
- (val) = le32_to_cpu(val); \
+ (val) = le32_to_cpu(*(__le32 *) &(val)); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = *((u32 *) (uintptr_t) (addr)); \
if (mem_written_by_device) \
- (val) = le32_to_cpu(val); \
+ (val) = le32_to_cpu(*(__le32 *) &(val)); \
break; \
} \
if (sleep_us) \
@@ -1110,11 +1126,12 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
timeout_us) \
({ \
ktime_t __timeout; \
- /* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
- __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ __timeout = ktime_add_us(ktime_get(),\
+ min((u64)(timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
(val) = readl(addr); \
@@ -1143,12 +1160,24 @@ struct hl_device_reset_work {
};
/**
+ * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
+ * @idle_to_busy_ts: timestamp where device changed from idle to busy.
+ * @busy_to_idle_ts: timestamp where device changed from busy to idle.
+ */
+struct hl_device_idle_busy_ts {
+ ktime_t idle_to_busy_ts;
+ ktime_t busy_to_idle_ts;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar: array of available PCIe bars.
* @rmmio: configuration area address on SRAM.
* @cdev: related char device.
- * @dev: realted kernel basic device structure.
+ * @cdev_ctrl: char device for control operations only (INFO IOCTL)
+ * @dev: related kernel basic device structure.
+ * @dev_ctrl: related kernel device structure for the control device
* @work_freq: delayed work to lower device frequency if possible.
* @work_heartbeat: delayed work for ArmCP is-alive check.
* @asic_name: ASIC specific nmae.
@@ -1156,25 +1185,19 @@ struct hl_device_reset_work {
* @completion_queue: array of hl_cq.
* @cq_wq: work queue of completion queues for executing work in process context
* @eq_wq: work queue of event queue for executing work in process context.
- * @kernel_ctx: KMD context structure.
+ * @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
* @hw_queues_mirror_list: CS mirror list for TDR.
* @hw_queues_mirror_lock: protects hw_queues_mirror_list.
* @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
* @event_queue: event queue for IRQ from ArmCP.
* @dma_pool: DMA pool for small allocations.
- * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
- * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
- * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
+ * @cpu_accessible_dma_mem: Host <-> ArmCP shared memory CPU address.
+ * @cpu_accessible_dma_address: Host <-> ArmCP shared memory DMA address.
+ * @cpu_accessible_dma_pool: Host <-> ArmCP shared memory pool.
* @asid_bitmap: holds used/available ASIDs.
* @asid_mutex: protects asid_bitmap.
- * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
- * fd_open_cnt is atomic, we need this lock to serialize
- * the open function because the driver currently supports
- * only a single process at a time. In addition, we need a
- * lock here so we can flush user processes which are opening
- * the device while we are trying to hard reset it
- * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
+ * @send_cpu_message_lock: enforces only one message in Host <-> ArmCP queue.
* @debug_lock: protects critical section of setting debug mode for device
* @asic_prop: ASIC specific immutable properties.
* @asic_funcs: ASIC specific functions.
@@ -1189,22 +1212,28 @@ struct hl_device_reset_work {
* @hl_debugfs: device's debugfs manager.
* @cb_pool: list of preallocated CBs.
* @cb_pool_lock: protects the CB pool.
- * @user_ctx: current user context executing.
+ * @fpriv_list: list of file private data structures. Each structure is created
+ * when a user opens the device
+ * @fpriv_list_lock: protects the fpriv_list
+ * @compute_ctx: current compute context executing.
+ * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
+ * and vice-versa
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
- * value is saved so in case of hard-reset, KMD will restore this
- * value and update the F/W after the re-initialization
+ * value is saved so in case of hard-reset, the driver will restore
+ * this value and update the F/W after the re-initialization
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
* @cs_active_cnt: number of active command submissions on this device (active
* means already in H/W queues)
- * @major: habanalabs KMD major.
+ * @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
- * @soft_reset_cnt: number of soft reset since KMD loading.
- * @hard_reset_cnt: number of hard reset since KMD loading.
+ * @soft_reset_cnt: number of soft reset since the driver was loaded.
+ * @hard_reset_cnt: number of hard reset since the driver was loaded.
+ * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
* @id: device minor.
+ * @id_control: minor of the control device
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -1218,15 +1247,18 @@ struct hl_device_reset_work {
* @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
- * @in_debug: is device under debug. This, together with fd_open_cnt, enforces
+ * @in_debug: is device under debug. This, together with fpriv_list, enforces
* that only a single user is configuring the debug infrastructure.
+ * @cdev_sysfs_created: were char devices and sysfs nodes created.
*/
struct hl_device {
struct pci_dev *pdev;
void __iomem *pcie_bar[6];
void __iomem *rmmio;
struct cdev cdev;
+ struct cdev cdev_ctrl;
struct device *dev;
+ struct device *dev_ctrl;
struct delayed_work work_freq;
struct delayed_work work_heartbeat;
char asic_name[16];
@@ -1246,8 +1278,6 @@ struct hl_device {
struct gen_pool *cpu_accessible_dma_pool;
unsigned long *asid_bitmap;
struct mutex asid_mutex;
- /* TODO: remove fd_open_cnt_lock for multiple process support */
- struct mutex fd_open_cnt_lock;
struct mutex send_cpu_message_lock;
struct mutex debug_lock;
struct asic_fixed_properties asic_prop;
@@ -1266,21 +1296,26 @@ struct hl_device {
struct list_head cb_pool;
spinlock_t cb_pool_lock;
- /* TODO: remove user_ctx for multiple process support */
- struct hl_ctx *user_ctx;
+ struct list_head fpriv_list;
+ struct mutex fpriv_list_lock;
+
+ struct hl_ctx *compute_ctx;
+
+ struct hl_device_idle_busy_ts *idle_busy_ts_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
atomic_t in_reset;
- atomic_t curr_pll_profile;
- atomic_t fd_open_cnt;
- atomic_t cs_active_cnt;
+ enum hl_pll_frequency curr_pll_profile;
+ int cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
u32 hard_reset_cnt;
+ u32 idle_busy_ts_idx;
u16 id;
+ u16 id_control;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
@@ -1293,6 +1328,7 @@ struct hl_device {
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;
+ u8 cdev_sysfs_created;
/* Parameters for bring-up */
u8 mmu_enable;
@@ -1386,6 +1422,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
int hl_device_open(struct inode *inode, struct file *filp);
+int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
enum hl_device_status hl_device_status(struct hl_device *hdev);
int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
@@ -1439,6 +1476,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
void hl_hpriv_get(struct hl_fpriv *hpriv);
void hl_hpriv_put(struct hl_fpriv *hpriv);
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
+uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct armcp_sensor *sensors_arr);
@@ -1625,6 +1663,7 @@ static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
/* IOCTLs */
long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 6f6dbe93f1df..8c342fb499ca 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -95,80 +95,127 @@ int hl_device_open(struct inode *inode, struct file *filp)
return -ENXIO;
}
- mutex_lock(&hdev->fd_open_cnt_lock);
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ mutex_init(&hpriv->restore_phase_mutex);
+ kref_init(&hpriv->refcount);
+ nonseekable_open(inode, filp);
+
+ hl_cb_mgr_init(&hpriv->cb_mgr);
+ hl_ctx_mgr_init(&hpriv->ctx_mgr);
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
+ mutex_lock(&hdev->fpriv_list_lock);
if (hl_device_disabled_or_in_reset(hdev)) {
dev_err_ratelimited(hdev->dev,
"Can't open %s because it is disabled or in reset\n",
dev_name(hdev->dev));
- mutex_unlock(&hdev->fd_open_cnt_lock);
- return -EPERM;
+ rc = -EPERM;
+ goto out_err;
}
if (hdev->in_debug) {
dev_err_ratelimited(hdev->dev,
"Can't open %s because it is being debugged by another user\n",
dev_name(hdev->dev));
- mutex_unlock(&hdev->fd_open_cnt_lock);
- return -EPERM;
+ rc = -EPERM;
+ goto out_err;
}
- if (atomic_read(&hdev->fd_open_cnt)) {
- dev_info_ratelimited(hdev->dev,
+ if (hdev->compute_ctx) {
+ dev_dbg_ratelimited(hdev->dev,
"Can't open %s because another user is working on it\n",
dev_name(hdev->dev));
- mutex_unlock(&hdev->fd_open_cnt_lock);
- return -EBUSY;
- }
-
- atomic_inc(&hdev->fd_open_cnt);
-
- mutex_unlock(&hdev->fd_open_cnt_lock);
-
- hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
- if (!hpriv) {
- rc = -ENOMEM;
- goto close_device;
+ rc = -EBUSY;
+ goto out_err;
}
- hpriv->hdev = hdev;
- filp->private_data = hpriv;
- hpriv->filp = filp;
- mutex_init(&hpriv->restore_phase_mutex);
- kref_init(&hpriv->refcount);
- nonseekable_open(inode, filp);
-
- hl_cb_mgr_init(&hpriv->cb_mgr);
- hl_ctx_mgr_init(&hpriv->ctx_mgr);
-
rc = hl_ctx_create(hdev, hpriv);
if (rc) {
- dev_err(hdev->dev, "Failed to open FD (CTX fail)\n");
+ dev_err(hdev->dev, "Failed to create context %d\n", rc);
goto out_err;
}
- hpriv->taskpid = find_get_pid(current->pid);
-
- /*
- * Device is IDLE at this point so it is legal to change PLLs. There
- * is no need to check anything because if the PLL is already HIGH, the
- * set function will return without doing anything
+ /* Device is IDLE at this point so it is legal to change PLLs.
+ * There is no need to check anything because if the PLL is
+ * already HIGH, the set function will return without doing
+ * anything
*/
hl_device_set_frequency(hdev, PLL_HIGH);
+ list_add(&hpriv->dev_node, &hdev->fpriv_list);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
hl_debugfs_add_file(hpriv);
return 0;
out_err:
- filp->private_data = NULL;
- hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ filp->private_data = NULL;
mutex_destroy(&hpriv->restore_phase_mutex);
+ put_pid(hpriv->taskpid);
+
kfree(hpriv);
+ return rc;
+}
+
+int hl_device_open_ctrl(struct inode *inode, struct file *filp)
+{
+ struct hl_device *hdev;
+ struct hl_fpriv *hpriv;
+ int rc;
+
+ mutex_lock(&hl_devs_idr_lock);
+ hdev = idr_find(&hl_devs_idr, iminor(inode));
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (!hdev) {
+ pr_err("Couldn't find device %d:%d\n",
+ imajor(inode), iminor(inode));
+ return -ENXIO;
+ }
+
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err_ratelimited(hdev->dev_ctrl,
+ "Can't open %s because it is disabled or in reset\n",
+ dev_name(hdev->dev_ctrl));
+ rc = -EPERM;
+ goto out_err;
+ }
-close_device:
- atomic_dec(&hdev->fd_open_cnt);
+ list_add(&hpriv->dev_node, &hdev->fpriv_list);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ hpriv->is_control = true;
+ nonseekable_open(inode, filp);
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&hdev->fpriv_list_lock);
+ kfree(hpriv);
return rc;
}
@@ -199,7 +246,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
enum hl_asic_type asic_type, int minor)
{
struct hl_device *hdev;
- int rc;
+ int rc, main_id, ctrl_id = 0;
*dev = NULL;
@@ -240,33 +287,34 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
mutex_lock(&hl_devs_idr_lock);
- if (minor == -1) {
- rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
+ /* Always save 2 numbers, 1 for main device and 1 for control.
+ * They must be consecutive
+ */
+ main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
GFP_KERNEL);
- } else {
- void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);
- if (IS_ERR_VALUE(old_idr)) {
- rc = PTR_ERR(old_idr);
- pr_err("Error %d when trying to replace minor %d\n",
- rc, minor);
- mutex_unlock(&hl_devs_idr_lock);
- goto free_hdev;
- }
- rc = minor;
- }
+ if (main_id >= 0)
+ ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
+ main_id + 2, GFP_KERNEL);
mutex_unlock(&hl_devs_idr_lock);
- if (rc < 0) {
- if (rc == -ENOSPC) {
+ if ((main_id < 0) || (ctrl_id < 0)) {
+ if ((main_id == -ENOSPC) || (ctrl_id == -ENOSPC))
pr_err("too many devices in the system\n");
- rc = -EBUSY;
+
+ if (main_id >= 0) {
+ mutex_lock(&hl_devs_idr_lock);
+ idr_remove(&hl_devs_idr, main_id);
+ mutex_unlock(&hl_devs_idr_lock);
}
+
+ rc = -EBUSY;
goto free_hdev;
}
- hdev->id = rc;
+ hdev->id = main_id;
+ hdev->id_control = ctrl_id;
*dev = hdev;
@@ -288,6 +336,7 @@ void destroy_hdev(struct hl_device *hdev)
/* Remove device from the device list */
mutex_lock(&hl_devs_idr_lock);
idr_remove(&hl_devs_idr, hdev->id);
+ idr_remove(&hl_devs_idr, hdev->id_control);
mutex_unlock(&hl_devs_idr_lock);
kfree(hdev);
@@ -295,8 +344,7 @@ void destroy_hdev(struct hl_device *hdev)
static int hl_pmops_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct hl_device *hdev = pci_get_drvdata(pdev);
+ struct hl_device *hdev = dev_get_drvdata(dev);
pr_debug("Going to suspend PCI device\n");
@@ -310,8 +358,7 @@ static int hl_pmops_suspend(struct device *dev)
static int hl_pmops_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct hl_device *hdev = pci_get_drvdata(pdev);
+ struct hl_device *hdev = dev_get_drvdata(dev);
pr_debug("Going to resume PCI device\n");
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 07127576b3e8..66d9c710073c 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -65,7 +65,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.armcp_version,
prop->armcp_info.armcp_version, VERSION_MAX_LEN);
- hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version);
+ hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
@@ -75,7 +75,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
}
-static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
+static int hw_events_info(struct hl_device *hdev, bool aggregate,
+ struct hl_info_args *args)
{
u32 size, max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -84,13 +85,14 @@ static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- arr = hdev->asic_funcs->get_events_stat(hdev, &size);
+ arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
}
-static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
+static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ struct hl_device *hdev = hpriv->hdev;
struct hl_info_dram_usage dram_usage = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -104,7 +106,9 @@ static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
prop->dram_base_address);
dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
atomic64_read(&hdev->dram_used_mem);
- dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem);
+ if (hpriv->ctx)
+ dram_usage.ctx_dram_mem =
+ atomic64_read(&hpriv->ctx->dram_phys_mem);
return copy_to_user(out, &dram_usage,
min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
@@ -141,13 +145,16 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
params->op = args->op;
if (args->input_ptr && args->input_size) {
- input = memdup_user(u64_to_user_ptr(args->input_ptr),
- args->input_size);
- if (IS_ERR(input)) {
- rc = PTR_ERR(input);
- input = NULL;
- dev_err(hdev->dev,
- "error %d when copying input debug data\n", rc);
+ input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
+ if (!input) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
+ args->input_size)) {
+ rc = -EFAULT;
+ dev_err(hdev->dev, "failed to copy input debug data\n");
goto out;
}
@@ -191,42 +198,81 @@ out:
return rc;
}
-static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_device_utilization device_util = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ if ((args->period_ms < 100) || (args->period_ms > 1000) ||
+ (args->period_ms % 100)) {
+ dev_err(hdev->dev,
+ "period %u must be between 100 - 1000 and must be divisible by 100\n",
+ args->period_ms);
+ return -EINVAL;
+ }
+
+ device_util.utilization = hl_device_utilization(hdev, args->period_ms);
+
+ return copy_to_user(out, &device_util,
+ min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
+}
+
+static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
+ struct device *dev)
{
struct hl_info_args *args = data;
struct hl_device *hdev = hpriv->hdev;
int rc;
- /* We want to return device status even if it disabled or in reset */
- if (args->op == HL_INFO_DEVICE_STATUS)
+ /*
+ * Information is returned for the following opcodes even if the device
+ * is disabled or in reset.
+ */
+ switch (args->op) {
+ case HL_INFO_HW_IP_INFO:
+ return hw_ip_info(hdev, args);
+
+ case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
+ default:
+ break;
+ }
+
if (hl_device_disabled_or_in_reset(hdev)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_warn_ratelimited(dev,
"Device is %s. Can't execute INFO IOCTL\n",
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
return -EBUSY;
}
switch (args->op) {
- case HL_INFO_HW_IP_INFO:
- rc = hw_ip_info(hdev, args);
- break;
-
case HL_INFO_HW_EVENTS:
- rc = hw_events_info(hdev, args);
+ rc = hw_events_info(hdev, false, args);
break;
case HL_INFO_DRAM_USAGE:
- rc = dram_usage_info(hdev, args);
+ rc = dram_usage_info(hpriv, args);
break;
case HL_INFO_HW_IDLE:
rc = hw_idle(hdev, args);
break;
+ case HL_INFO_DEVICE_UTILIZATION:
+ rc = device_utilization(hdev, args);
+ break;
+
+ case HL_INFO_HW_EVENTS_AGGREGATE:
+ rc = hw_events_info(hdev, true, args);
+ break;
+
default:
- dev_err(hdev->dev, "Invalid request %d\n", args->op);
+ dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
break;
}
@@ -234,6 +280,16 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
+static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
+}
+
+static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
+{
+ return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
+}
+
static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_debug_args *args = data;
@@ -288,52 +344,45 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
};
-#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
+static const struct hl_ioctl_desc hl_ioctls_control[] = {
+ HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
+};
-long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
+ const struct hl_ioctl_desc *ioctl, struct device *dev)
{
struct hl_fpriv *hpriv = filep->private_data;
struct hl_device *hdev = hpriv->hdev;
- hl_ioctl_t *func;
- const struct hl_ioctl_desc *ioctl = NULL;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128] = {0};
char *kdata = NULL;
unsigned int usize, asize;
+ hl_ioctl_t *func;
+ u32 hl_size;
int retcode;
if (hdev->hard_reset_pending) {
- dev_crit_ratelimited(hdev->dev,
+ dev_crit_ratelimited(hdev->dev_ctrl,
"Device HARD reset pending! Please close FD\n");
return -ENODEV;
}
- if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
- u32 hl_size;
-
- ioctl = &hl_ioctls[nr];
-
- hl_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (hl_size > asize)
- asize = hl_size;
-
- cmd = ioctl->cmd;
- } else {
- dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
- task_pid_nr(current), nr);
- return -ENOTTY;
- }
-
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
- dev_dbg(hdev->dev, "no function\n");
+ dev_dbg(dev, "no function\n");
retcode = -ENOTTY;
goto out_err;
}
+ hl_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (hl_size > asize)
+ asize = hl_size;
+
+ cmd = ioctl->cmd;
+
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
@@ -363,8 +412,7 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
out_err:
if (retcode)
- dev_dbg(hdev->dev,
- "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current), cmd, nr);
if (kdata != stack_kdata)
@@ -372,3 +420,39 @@ out_err:
return retcode;
}
+
+long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct hl_fpriv *hpriv = filep->private_data;
+ struct hl_device *hdev = hpriv->hdev;
+ const struct hl_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+
+ if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
+ ioctl = &hl_ioctls[nr];
+ } else {
+ dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
+ task_pid_nr(current), nr);
+ return -ENOTTY;
+ }
+
+ return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
+}
+
+long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct hl_fpriv *hpriv = filep->private_data;
+ struct hl_device *hdev = hpriv->hdev;
+ const struct hl_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+
+ if (nr == _IOC_NR(HL_IOCTL_INFO)) {
+ ioctl = &hl_ioctls_control[nr];
+ } else {
+ dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
+ task_pid_nr(current), nr);
+ return -ENOTTY;
+ }
+
+ return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
+}
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 5f5673b74985..55b383b2a116 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -80,9 +80,9 @@ static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
bd += hl_pi_2_offset(q->pi);
- bd->ctl = __cpu_to_le32(ctl);
- bd->len = __cpu_to_le32(len);
- bd->ptr = __cpu_to_le64(ptr);
+ bd->ctl = cpu_to_le32(ctl);
+ bd->len = cpu_to_le32(len);
+ bd->ptr = cpu_to_le64(ptr);
q->pi = hl_queue_inc_ptr(q->pi);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
@@ -249,7 +249,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
len = job->job_cb_size;
ptr = cb->bus_address;
- cq_pkt.data = __cpu_to_le32(
+ cq_pkt.data = cpu_to_le32(
((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
& CQ_ENTRY_SHADOW_INDEX_MASK) |
(1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
@@ -267,7 +267,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
cq_addr,
- __le32_to_cpu(cq_pkt.data),
+ le32_to_cpu(cq_pkt.data),
q->hw_queue_id);
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
@@ -364,7 +364,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
- atomic_inc(&hdev->cs_active_cnt);
+ if (!hdev->cs_active_cnt++) {
+ struct hl_device_idle_busy_ts *ts;
+
+ ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
+ ts->busy_to_idle_ts = ktime_set(0, 0);
+ ts->idle_to_busy_ts = ktime_get();
+ }
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
if (job->ext_queue)
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index 77facd25c4a2..7be4bace9b4f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -26,7 +26,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
int rc, i, j;
for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
- type = __le32_to_cpu(sensors_arr[i].type);
+ type = le32_to_cpu(sensors_arr[i].type);
if ((type == 0) && (sensors_arr[i].flags == 0))
break;
@@ -58,10 +58,10 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
}
for (i = 0 ; i < arr_size ; i++) {
- type = __le32_to_cpu(sensors_arr[i].type);
+ type = le32_to_cpu(sensors_arr[i].type);
curr_arr = sensors_by_type[type];
curr_arr[sensors_by_type_next_index[type]++] =
- __le32_to_cpu(sensors_arr[i].flags);
+ le32_to_cpu(sensors_arr[i].flags);
}
channels_info = kcalloc(num_active_sensor_types + 1,
@@ -273,7 +273,7 @@ long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -299,7 +299,7 @@ long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -325,7 +325,7 @@ long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -351,7 +351,7 @@ long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -377,7 +377,7 @@ long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -403,11 +403,11 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
- pkt.value = __cpu_to_le64(value);
+ pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
@@ -421,6 +421,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
@@ -430,7 +431,8 @@ int hl_hwmon_init(struct hl_device *hdev)
hdev->hl_chip_info->ops = &hl_hwmon_ops;
hdev->hwmon_dev = hwmon_device_register_with_info(dev,
- "habanalabs", hdev, hdev->hl_chip_info, NULL);
+ prop->armcp_info.card_name, hdev,
+ hdev->hl_chip_info, NULL);
if (IS_ERR(hdev->hwmon_dev)) {
rc = PTR_ERR(hdev->hwmon_dev);
dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index 1f1e35e86d84..e4c6699a1868 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -41,33 +41,34 @@ enum pq_init_status {
/*
* ArmCP Primary Queue Packets
*
- * During normal operation, KMD needs to send various messages to ArmCP,
- * usually either to SET some value into a H/W periphery or to GET the current
- * value of some H/W periphery. For example, SET the frequency of MME/TPC and
- * GET the value of the thermal sensor.
- *
- * These messages can be initiated either by the User application or by KMD
- * itself, e.g. power management code. In either case, the communication from
- * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will
- * send a single message and poll until the message was acknowledged and the
- * results are ready (if results are needed).
- *
- * This means that only a single message can be sent at a time and KMD must
- * wait for its result before sending the next message. Having said that,
- * because these are control messages which are sent in a relatively low
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to ArmCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to ArmCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
* frequency, this limitation seems acceptable. It's important to note that
* in case of multiple devices, messages to different devices *can* be sent
* at the same time.
*
* The message, inputs/outputs (if relevant) and fence object will be located
- * on the device DDR at an address that will be determined by KMD. During
- * device initialization phase, KMD will pass to ArmCP that address. Most of
- * the message types will contain inputs/outputs inside the message itself.
- * The common part of each message will contain the opcode of the message (its
- * type) and a field representing a fence object.
- *
- * When KMD wishes to send a message to ArmCP, it will write the message
- * contents to the device DDR, clear the fence object and then write the
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to ArmCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to ArmCP, it will write the
+ * message contents to the device DDR, clear the fence object and then write the
* value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
* the 484 interrupt-id to the ARM core.
*
@@ -78,12 +79,13 @@ enum pq_init_status {
* device DDR and then write to the fence object. If an error occurred, ArmCP
* will fill the rc field with the right error code.
*
- * In the meantime, KMD will poll on the fence object. Once KMD sees that the
- * fence object is signaled, it will read the results from the device DDR
- * (if relevant) and resume the code execution in KMD.
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
*
* To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
- * so the value being put by the KMD matches the value read by ArmCP
+ * so the value being put by the host's driver matches the value read by ArmCP
*
* Non-QMAN packets should be limited to values 1 through (2^8 - 1)
*
@@ -148,9 +150,9 @@ enum pq_init_status {
*
* ARMCP_PACKET_INFO_GET -
* Fetch information from the device as specified in the packet's
- * structure. KMD passes the max size it allows the ArmCP to write to
- * the structure, to prevent data corruption in case of mismatched
- * KMD/FW versions.
+ * structure. The host's driver passes the max size it allows the ArmCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
*
* ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
*
@@ -183,9 +185,9 @@ enum pq_init_status {
* ARMCP_PACKET_EEPROM_DATA_GET -
* Get EEPROM data from the ArmCP kernel. The buffer is specified in the
* addr field. The CPU will put the returned data size in the result
- * field. In addition, KMD passes the max size it allows the ArmCP to
- * write to the structure, to prevent data corruption in case of
- * mismatched KMD/FW versions.
+ * field. In addition, the host's driver passes the max size it allows the
+ * ArmCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
*
*/
@@ -231,7 +233,7 @@ struct armcp_packet {
__le32 ctl;
- __le32 fence; /* Signal to KMD that message is completed */
+ __le32 fence; /* Signal to host that message is completed */
union {
struct {/* For temperature/current/voltage/fan/pwm get/set */
@@ -310,6 +312,7 @@ struct eq_generic_event {
* ArmCP info
*/
+#define CARD_NAME_MAX_LEN 16
#define VERSION_MAX_LEN 128
#define ARMCP_MAX_SENSORS 128
@@ -318,6 +321,19 @@ struct armcp_sensor {
__le32 flags;
};
+/**
+ * struct armcp_info - Info from ArmCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: ArmCP linux kernel version.
+ * @reserved: reserved field.
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @armcp_version: ArmCP S/W version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ */
struct armcp_info {
struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN];
@@ -328,6 +344,7 @@ struct armcp_info {
__u8 thermal_version[VERSION_MAX_LEN];
__u8 armcp_version[VERSION_MAX_LEN];
__le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
};
#endif /* ARMCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
index 3f02a52ba4ce..43d241891e45 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -38,4 +38,6 @@
#define TPC_MAX_NUM 8
+#define MME_MAX_NUM 1
+
#endif /* GOYA_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
new file mode 100644
index 000000000000..cd89723c7f61
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_REG_MAP_H_
+#define GOYA_REG_MAP_H_
+
+/*
+ * PSOC scratch-pad registers
+ */
+#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmUBOOT_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+
+#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index 199791b57caf..fac65fbd70e8 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -160,7 +160,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
while (1) {
bool entry_ready =
- ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
+ ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
if (!entry_ready)
@@ -194,7 +194,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
skip_irq:
/* Clear EQ entry ready bit */
eq_entry->hdr.ctl =
- __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) &
+ cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
~EQ_CTL_READY_MASK);
eq->ci = hl_eq_inc_ptr(eq->ci);
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index 25eb46d29d88..4cd622b017b9 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -21,12 +21,12 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
memset(&pkt, 0, sizeof(pkt));
if (curr)
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
else
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_index = __cpu_to_le32(pll_index);
+ pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, &result);
@@ -48,10 +48,10 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_index = __cpu_to_le32(pll_index);
- pkt.value = __cpu_to_le64(freq);
+ pkt.pll_index = cpu_to_le32(pll_index);
+ pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, NULL);
@@ -70,7 +70,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -91,9 +91,9 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.value = __cpu_to_le64(value);
+ pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_PWR_PKT_TIMEOUT, NULL);
@@ -102,100 +102,6 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
}
-static ssize_t pm_mng_profile_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- if (hl_device_disabled_or_in_reset(hdev))
- return -ENODEV;
-
- return sprintf(buf, "%s\n",
- (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
- (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
- "unknown");
-}
-
-static ssize_t pm_mng_profile_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- if (hl_device_disabled_or_in_reset(hdev)) {
- count = -ENODEV;
- goto out;
- }
-
- mutex_lock(&hdev->fd_open_cnt_lock);
-
- if (atomic_read(&hdev->fd_open_cnt) > 0) {
- dev_err(hdev->dev,
- "Can't change PM profile while user process is opened on the device\n");
- count = -EPERM;
- goto unlock_mutex;
- }
-
- if (strncmp("auto", buf, strlen("auto")) == 0) {
- /* Make sure we are in LOW PLL when changing modes */
- if (hdev->pm_mng_profile == PM_MANUAL) {
- atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
- hl_device_set_frequency(hdev, PLL_LOW);
- hdev->pm_mng_profile = PM_AUTO;
- }
- } else if (strncmp("manual", buf, strlen("manual")) == 0) {
- /* Make sure we are in LOW PLL when changing modes */
- if (hdev->pm_mng_profile == PM_AUTO) {
- flush_delayed_work(&hdev->work_freq);
- hdev->pm_mng_profile = PM_MANUAL;
- }
- } else {
- dev_err(hdev->dev, "value should be auto or manual\n");
- count = -EINVAL;
- goto unlock_mutex;
- }
-
-unlock_mutex:
- mutex_unlock(&hdev->fd_open_cnt_lock);
-out:
- return count;
-}
-
-static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- if (hl_device_disabled_or_in_reset(hdev))
- return -ENODEV;
-
- return sprintf(buf, "%u\n", hdev->high_pll);
-}
-
-static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
- long value;
- int rc;
-
- if (hl_device_disabled_or_in_reset(hdev)) {
- count = -ENODEV;
- goto out;
- }
-
- rc = kstrtoul(buf, 0, &value);
-
- if (rc) {
- count = -EINVAL;
- goto out;
- }
-
- hdev->high_pll = value;
-
-out:
- return count;
-}
-
static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -351,14 +257,6 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n", str);
}
-static ssize_t write_open_cnt_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0);
-}
-
static ssize_t soft_reset_cnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -450,18 +348,15 @@ static DEVICE_ATTR_RO(device_type);
static DEVICE_ATTR_RO(fuse_ver);
static DEVICE_ATTR_WO(hard_reset);
static DEVICE_ATTR_RO(hard_reset_cnt);
-static DEVICE_ATTR_RW(high_pll);
static DEVICE_ATTR_RO(infineon_ver);
static DEVICE_ATTR_RW(max_power);
static DEVICE_ATTR_RO(pci_addr);
-static DEVICE_ATTR_RW(pm_mng_profile);
static DEVICE_ATTR_RO(preboot_btl_ver);
static DEVICE_ATTR_WO(soft_reset);
static DEVICE_ATTR_RO(soft_reset_cnt);
static DEVICE_ATTR_RO(status);
static DEVICE_ATTR_RO(thermal_ver);
static DEVICE_ATTR_RO(uboot_ver);
-static DEVICE_ATTR_RO(write_open_cnt);
static struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
@@ -477,18 +372,15 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_fuse_ver.attr,
&dev_attr_hard_reset.attr,
&dev_attr_hard_reset_cnt.attr,
- &dev_attr_high_pll.attr,
&dev_attr_infineon_ver.attr,
&dev_attr_max_power.attr,
&dev_attr_pci_addr.attr,
- &dev_attr_pm_mng_profile.attr,
&dev_attr_preboot_btl_ver.attr,
&dev_attr_soft_reset.attr,
&dev_attr_soft_reset_cnt.attr,
&dev_attr_status.attr,
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
- &dev_attr_write_open_cnt.attr,
NULL,
};
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index fb10eafe9bde..c70b3822013f 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -9,6 +9,7 @@ lkdtm-$(CONFIG_LKDTM) += refcount.o
lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
lkdtm-$(CONFIG_LKDTM) += stackleak.o
+lkdtm-$(CONFIG_LKDTM) += cfi.o
KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 24245ccdba72..7284a22b1a09 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -75,7 +75,12 @@ static int warn_counter;
void lkdtm_WARNING(void)
{
- WARN(1, "Warning message trigger count: %d\n", warn_counter++);
+ WARN_ON(++warn_counter);
+}
+
+void lkdtm_WARNING_MESSAGE(void)
+{
+ WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
}
void lkdtm_EXCEPTION(void)
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
new file mode 100644
index 000000000000..e73ebdbfa806
--- /dev/null
+++ b/drivers/misc/lkdtm/cfi.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to Control Flow Integrity.
+ */
+#include "lkdtm.h"
+
+static int called_count;
+
+/* Function taking one argument, without a return value. */
+static noinline void lkdtm_increment_void(int *counter)
+{
+ (*counter)++;
+}
+
+/* Function taking one argument, returning int. */
+static noinline int lkdtm_increment_int(int *counter)
+{
+ (*counter)++;
+
+ return *counter;
+}
+/*
+ * This tries to call an indirect function with a mismatched prototype.
+ */
+void lkdtm_CFI_FORWARD_PROTO(void)
+{
+ /*
+ * Matches lkdtm_increment_void()'s prototype, but not
+ * lkdtm_increment_int()'s prototype.
+ */
+ void (*func)(int *);
+
+ pr_info("Calling matched prototype ...\n");
+ func = lkdtm_increment_void;
+ func(&called_count);
+
+ pr_info("Calling mismatched prototype ...\n");
+ func = (void *)lkdtm_increment_int;
+ func(&called_count);
+
+ pr_info("Fail: survived mismatched prototype function call!\n");
+}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 66ae6b2a6950..cbc4c9045a99 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -104,6 +104,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(EXHAUST_STACK),
@@ -169,6 +170,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_KERNEL),
CRASHTYPE(USERCOPY_KERNEL_DS),
CRASHTYPE(STACKLEAK_ERASING),
+ CRASHTYPE(CFI_FORWARD_PROTO),
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 6a284a87a037..ab446e0bde97 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -11,6 +11,7 @@ void __init lkdtm_bugs_init(int *recur_param);
void lkdtm_PANIC(void);
void lkdtm_BUG(void);
void lkdtm_WARNING(void);
+void lkdtm_WARNING_MESSAGE(void);
void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
void lkdtm_EXHAUST_STACK(void);
@@ -95,4 +96,7 @@ void lkdtm_USERCOPY_KERNEL_DS(void);
/* lkdtm_stackleak.c */
void lkdtm_STACKLEAK_ERASING(void);
+/* cfi.c */
+void lkdtm_CFI_FORWARD_PROTO(void);
+
#endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 541538eff8b1..d5a92c6eadb3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -383,12 +383,11 @@ static int mei_me_pci_resume(struct device *device)
#ifdef CONFIG_PM
static int mei_me_pm_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
- dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
+ dev_dbg(device, "rpm: me: runtime_idle\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
@@ -399,13 +398,12 @@ static int mei_me_pm_runtime_idle(struct device *device)
static int mei_me_pm_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
+ dev_dbg(device, "rpm: me: runtime suspend\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -418,7 +416,7 @@ static int mei_me_pm_runtime_suspend(struct device *device)
mutex_unlock(&dev->device_lock);
- dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
+ dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
if (ret && ret != -EAGAIN)
schedule_work(&dev->reset_work);
@@ -428,13 +426,12 @@ static int mei_me_pm_runtime_suspend(struct device *device)
static int mei_me_pm_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
+ dev_dbg(device, "rpm: me: runtime resume\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -444,7 +441,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
mutex_unlock(&dev->device_lock);
- dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
+ dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
if (ret)
schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 2e37fc2e0fa8..f1c16a587495 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -276,12 +276,11 @@ static int mei_txe_pci_resume(struct device *device)
#ifdef CONFIG_PM
static int mei_txe_pm_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
- dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n");
+ dev_dbg(device, "rpm: txe: runtime_idle\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
@@ -291,13 +290,12 @@ static int mei_txe_pm_runtime_idle(struct device *device)
}
static int mei_txe_pm_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n");
+ dev_dbg(device, "rpm: txe: runtime suspend\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -310,7 +308,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
/* keep irq on we are staying in D0 */
- dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
+ dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
mutex_unlock(&dev->device_lock);
@@ -322,13 +320,12 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
static int mei_txe_pm_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n");
+ dev_dbg(device, "rpm: txe: runtime resume\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -340,7 +337,7 @@ static int mei_txe_pm_runtime_resume(struct device *device)
mutex_unlock(&dev->device_lock);
- dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
+ dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
if (ret)
schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index 266ffb6f6c44..c8bff2916d3d 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -237,6 +237,9 @@ static int __init mic_probe(struct platform_device *pdev)
mdrv->dev = &pdev->dev;
snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
+ /* FIXME: use dma_set_mask_and_coherent() and check result */
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
mdev->mmio.pa = MIC_X100_MMIO_BASE;
mdev->mmio.len = MIC_X100_MMIO_LEN;
mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
@@ -282,18 +285,6 @@ static void mic_platform_shutdown(struct platform_device *pdev)
mic_remove(pdev);
}
-static u64 mic_dma_mask = DMA_BIT_MASK(64);
-
-static struct platform_device mic_platform_dev = {
- .name = mic_driver_name,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .dma_mask = &mic_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- },
-};
-
static struct platform_driver __refdata mic_platform_driver = {
.probe = mic_probe,
.remove = mic_remove,
@@ -303,6 +294,8 @@ static struct platform_driver __refdata mic_platform_driver = {
},
};
+static struct platform_device *mic_platform_dev;
+
static int __init mic_init(void)
{
int ret;
@@ -316,9 +309,12 @@ static int __init mic_init(void)
request_module("mic_x100_dma");
mic_init_card_debugfs();
- ret = platform_device_register(&mic_platform_dev);
+
+ mic_platform_dev = platform_device_register_simple(mic_driver_name,
+ 0, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(mic_platform_dev);
if (ret) {
- pr_err("platform_device_register ret %d\n", ret);
+ pr_err("platform_device_register_full ret %d\n", ret);
goto cleanup_debugfs;
}
ret = platform_driver_register(&mic_platform_driver);
@@ -329,7 +325,7 @@ static int __init mic_init(void)
return ret;
device_unregister:
- platform_device_unregister(&mic_platform_dev);
+ platform_device_unregister(mic_platform_dev);
cleanup_debugfs:
mic_exit_card_debugfs();
done:
@@ -339,7 +335,7 @@ done:
static void __exit mic_exit(void)
{
platform_driver_unregister(&mic_platform_driver);
- platform_device_unregister(&mic_platform_dev);
+ platform_device_unregister(mic_platform_dev);
mic_exit_card_debugfs();
}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index d3837f8a5ba0..0b9dfe1cc06c 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -156,9 +156,8 @@ static inline int scif_verify_epd(struct scif_endpt *ep)
static inline int scif_anon_inode_getfile(scif_epd_t epd)
{
epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0);
- if (IS_ERR(epd->anon))
- return PTR_ERR(epd->anon);
- return 0;
+
+ return PTR_ERR_OR_ZERO(epd->anon);
}
static inline void scif_anon_inode_fput(scif_epd_t epd)
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 09e24659ef3d..98c60f11b76b 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -696,7 +696,7 @@ again:
if (gru_mq_desc == NULL) {
gru_mq_desc = kmalloc(sizeof(struct
gru_message_queue_desc),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (gru_mq_desc == NULL) {
ret = xpNoMemory;
goto done;
@@ -1680,7 +1680,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
}
-static struct xpc_arch_operations xpc_arch_ops_uv = {
+static const struct xpc_arch_operations xpc_arch_ops_uv = {
.setup_partitions = xpc_setup_partitions_uv,
.teardown_partitions = xpc_teardown_partitions_uv,
.process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
deleted file mode 100644
index ee120dcbb3e6..000000000000
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * drivers/misc/spear13xx_pcie_gadget.c
- *
- * Copyright (C) 2010 ST Microelectronics
- * Pratyush Anand<pratyush.anand@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pci_regs.h>
-#include <linux/configfs.h>
-#include <mach/pcie.h>
-#include <mach/misc_regs.h>
-
-#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
-/* In current implementation address translation is done using IN0 only.
- * So IN1 start address and IN0 end address has been kept same
-*/
-#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
-#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
-#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
-#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
-#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
-/* Keep default BAR size as 4K*/
-/* AORAM would be mapped by default*/
-#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
-
-#define INT_TYPE_NO_INT 0
-#define INT_TYPE_INTX 1
-#define INT_TYPE_MSI 2
-struct spear_pcie_gadget_config {
- void __iomem *base;
- void __iomem *va_app_base;
- void __iomem *va_dbi_base;
- char int_type[10];
- ulong requested_msi;
- ulong configured_msi;
- ulong bar0_size;
- ulong bar0_rw_offset;
- void __iomem *va_bar0_address;
-};
-
-struct pcie_gadget_target {
- struct configfs_subsystem subsys;
- struct spear_pcie_gadget_config config;
-};
-
-struct pcie_gadget_target_attr {
- struct configfs_attribute attr;
- ssize_t (*show)(struct spear_pcie_gadget_config *config,
- char *buf);
- ssize_t (*store)(struct spear_pcie_gadget_config *config,
- const char *buf,
- size_t count);
-};
-
-static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
-{
- /* Enable DBI access */
- writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_armisc);
- writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_awmisc);
-
-}
-
-static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
-{
- /* disable DBI access */
- writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_armisc);
- writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_awmisc);
-
-}
-
-static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
- int where, int size, u32 *val)
-{
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong va_address;
-
- /* Enable DBI access */
- enable_dbi_access(app_reg);
-
- va_address = (ulong)config->va_dbi_base + (where & ~0x3);
-
- *val = readl(va_address);
-
- if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
- else if (size == 2)
- *val = (*val >> (8 * (where & 3))) & 0xffff;
-
- /* Disable DBI access */
- disable_dbi_access(app_reg);
-}
-
-static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
- int where, int size, u32 val)
-{
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong va_address;
-
- /* Enable DBI access */
- enable_dbi_access(app_reg);
-
- va_address = (ulong)config->va_dbi_base + (where & ~0x3);
-
- if (size == 4)
- writel(val, va_address);
- else if (size == 2)
- writew(val, va_address + (where & 2));
- else if (size == 1)
- writeb(val, va_address + (where & 3));
-
- /* Disable DBI access */
- disable_dbi_access(app_reg);
-}
-
-#define PCI_FIND_CAP_TTL 48
-
-static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
- u32 pos, int cap, int *ttl)
-{
- u32 id;
-
- while ((*ttl)--) {
- spear_dbi_read_reg(config, pos, 1, &pos);
- if (pos < 0x40)
- break;
- pos &= ~3;
- spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos += PCI_CAP_LIST_NEXT;
- }
- return 0;
-}
-
-static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
- u32 pos, int cap)
-{
- int ttl = PCI_FIND_CAP_TTL;
-
- return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
-}
-
-static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
- u8 hdr_type)
-{
- u32 status;
-
- spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
- if (!(status & PCI_STATUS_CAP_LIST))
- return 0;
-
- switch (hdr_type) {
- case PCI_HEADER_TYPE_NORMAL:
- case PCI_HEADER_TYPE_BRIDGE:
- return PCI_CAPABILITY_LIST;
- case PCI_HEADER_TYPE_CARDBUS:
- return PCI_CB_CAPABILITY_LIST;
- default:
- return 0;
- }
-
- return 0;
-}
-
-/*
- * Tell if a device supports a given PCI capability.
- * Returns the address of the requested capability structure within the
- * device's PCI configuration space or 0 in case the device does not
- * support it. Possible values for @cap:
- *
- * %PCI_CAP_ID_PM Power Management
- * %PCI_CAP_ID_AGP Accelerated Graphics Port
- * %PCI_CAP_ID_VPD Vital Product Data
- * %PCI_CAP_ID_SLOTID Slot Identification
- * %PCI_CAP_ID_MSI Message Signalled Interrupts
- * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
- * %PCI_CAP_ID_PCIX PCI-X
- * %PCI_CAP_ID_EXP PCI Express
- */
-static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
- int cap)
-{
- u32 pos;
- u32 hdr_type;
-
- spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
-
- pos = pci_find_own_cap_start(config, hdr_type);
- if (pos)
- pos = pci_find_own_next_cap(config, pos, cap);
-
- return pos;
-}
-
-static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
-{
- return 0;
-}
-
-/*
- * configfs interfaces show/store functions
- */
-
-static struct pcie_gadget_target *to_target(struct config_item *item)
-{
- return item ?
- container_of(to_configfs_subsystem(to_config_group(item)),
- struct pcie_gadget_target, subsys) : NULL;
-}
-
-static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
-
- if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
- return sprintf(buf, "UP");
- else
- return sprintf(buf, "DOWN");
-}
-
-static ssize_t pcie_gadget_link_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
-
- if (sysfs_streq(buf, "UP"))
- writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
- &app_reg->app_ctrl_0);
- else if (sysfs_streq(buf, "DOWN"))
- writel(readl(&app_reg->app_ctrl_0)
- & ~(1 << APP_LTSSM_ENABLE_ID),
- &app_reg->app_ctrl_0);
- else
- return -EINVAL;
- return count;
-}
-
-static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf)
-{
- return sprintf(buf, "%s", to_target(item)->int_type);
-}
-
-static ssize_t pcie_gadget_int_type_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- u32 cap, vec, flags;
- ulong vector;
-
- if (sysfs_streq(buf, "INTA"))
- spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
-
- else if (sysfs_streq(buf, "MSI")) {
- vector = config->requested_msi;
- vec = 0;
- while (vector > 1) {
- vector /= 2;
- vec++;
- }
- spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
- cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
- spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
- flags &= ~PCI_MSI_FLAGS_QMASK;
- flags |= vec << 1;
- spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
- } else
- return -EINVAL;
-
- strcpy(config->int_type, buf);
-
- return count;
-}
-
-static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
- u32 cap, vec, flags;
- ulong vector;
-
- if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
- != (1 << CFG_MSI_EN_ID))
- vector = 0;
- else {
- cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
- spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
- flags &= ~PCI_MSI_FLAGS_QSIZE;
- vec = flags >> 4;
- vector = 1;
- while (vec--)
- vector *= 2;
- }
- config->configured_msi = vector;
-
- return sprintf(buf, "%lu", vector);
-}
-
-static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item,
- const char *buf, size_t count)
-{
- int ret;
-
- ret = kstrtoul(buf, 0, &to_target(item)->requested_msi);
- if (ret)
- return ret;
-
- if (config->requested_msi > 32)
- config->requested_msi = 32;
-
- return count;
-}
-
-static ssize_t pcie_gadget_inta_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
- ulong en;
- int ret;
-
- ret = kstrtoul(buf, 0, &en);
- if (ret)
- return ret;
-
- if (en)
- writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
- &app_reg->app_ctrl_0);
- else
- writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
- &app_reg->app_ctrl_0);
-
- return count;
-}
-
-static ssize_t pcie_gadget_send_msi_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong vector;
- u32 ven_msi;
- int ret;
-
- ret = kstrtoul(buf, 0, &vector);
- if (ret)
- return ret;
-
- if (!config->configured_msi)
- return -EINVAL;
-
- if (vector >= config->configured_msi)
- return -EINVAL;
-
- ven_msi = readl(&app_reg->ven_msi_1);
- ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
- ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
- ven_msi &= ~VEN_MSI_TC_MASK;
- ven_msi |= 0 << VEN_MSI_TC_ID;
- ven_msi &= ~VEN_MSI_VECTOR_MASK;
- ven_msi |= vector << VEN_MSI_VECTOR_ID;
-
- /* generating interrupt for msi vector */
- ven_msi |= VEN_MSI_REQ_EN;
- writel(ven_msi, &app_reg->ven_msi_1);
- udelay(1);
- ven_msi &= ~VEN_MSI_REQ_EN;
- writel(ven_msi, &app_reg->ven_msi_1);
-
- return count;
-}
-
-static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf)
-{
- u32 id;
-
- spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id);
-
- return sprintf(buf, "%x", id);
-}
-
-static ssize_t pcie_gadget_vendor_id_store(struct config_item *item,
- const char *buf, size_t count)
-{
- ulong id;
- int ret;
-
- ret = kstrtoul(buf, 0, &id);
- if (ret)
- return ret;
-
- spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id);
-
- return count;
-}
-
-static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf)
-{
- u32 id;
-
- spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id);
-
- return sprintf(buf, "%x", id);
-}
-
-static ssize_t pcie_gadget_device_id_store(struct config_item *item,
- const char *buf, size_t count)
-{
- ulong id;
- int ret;
-
- ret = kstrtoul(buf, 0, &id);
- if (ret)
- return ret;
-
- spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id);
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf)
-{
- return sprintf(buf, "%lx", to_target(item)->bar0_size);
-}
-
-static ssize_t pcie_gadget_bar0_size_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- ulong size;
- u32 pos, pos1;
- u32 no_of_bit = 0;
- int ret;
-
- ret = kstrtoul(buf, 0, &size);
- if (ret)
- return ret;
-
- /* min bar size is 256 */
- if (size <= 0x100)
- size = 0x100;
- /* max bar size is 1MB*/
- else if (size >= 0x100000)
- size = 0x100000;
- else {
- pos = 0;
- pos1 = 0;
- while (pos < 21) {
- pos = find_next_bit((ulong *)&size, 21, pos);
- if (pos != 21)
- pos1 = pos + 1;
- pos++;
- no_of_bit++;
- }
- if (no_of_bit == 2)
- pos1--;
-
- size = 1 << pos1;
- }
- config->bar0_size = size;
- spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_address_show(struct config_item *item,
- char *buf)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
-
- u32 address = readl(&app_reg->pim0_mem_addr_start);
-
- return sprintf(buf, "%x", address);
-}
-
-static ssize_t pcie_gadget_bar0_address_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong address;
- int ret;
-
- ret = kstrtoul(buf, 0, &address);
- if (ret)
- return ret;
-
- address &= ~(config->bar0_size - 1);
- if (config->va_bar0_address)
- iounmap(config->va_bar0_address);
- config->va_bar0_address = ioremap(address, config->bar0_size);
- if (!config->va_bar0_address)
- return -ENOMEM;
-
- writel(address, &app_reg->pim0_mem_addr_start);
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item,
- char *buf)
-{
- return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset);
-}
-
-static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item,
- const char *buf, size_t count)
-{
- ulong offset;
- int ret;
-
- ret = kstrtoul(buf, 0, &offset);
- if (ret)
- return ret;
-
- if (offset % 4)
- return -EINVAL;
-
- to_target(item)->bar0_rw_offset = offset;
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- ulong data;
-
- if (!config->va_bar0_address)
- return -ENOMEM;
-
- data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
-
- return sprintf(buf, "%lx", data);
-}
-
-static ssize_t pcie_gadget_bar0_data_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- ulong data;
- int ret;
-
- ret = kstrtoul(buf, 0, &data);
- if (ret)
- return ret;
-
- if (!config->va_bar0_address)
- return -ENOMEM;
-
- writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
-
- return count;
-}
-
-CONFIGFS_ATTR(pcie_gadget_, link);
-CONFIGFS_ATTR(pcie_gadget_, int_type);
-CONFIGFS_ATTR(pcie_gadget_, no_of_msi);
-CONFIGFS_ATTR_WO(pcie_gadget_, inta);
-CONFIGFS_ATTR_WO(pcie_gadget_, send_msi);
-CONFIGFS_ATTR(pcie_gadget_, vendor_id);
-CONFIGFS_ATTR(pcie_gadget_, device_id);
-CONFIGFS_ATTR(pcie_gadget_, bar0_size);
-CONFIGFS_ATTR(pcie_gadget_, bar0_address);
-CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset);
-CONFIGFS_ATTR(pcie_gadget_, bar0_data);
-
-static struct configfs_attribute *pcie_gadget_target_attrs[] = {
- &pcie_gadget_attr_link,
- &pcie_gadget_attr_int_type,
- &pcie_gadget_attr_no_of_msi,
- &pcie_gadget_attr_inta,
- &pcie_gadget_attr_send_msi,
- &pcie_gadget_attr_vendor_id,
- &pcie_gadget_attr_device_id,
- &pcie_gadget_attr_bar0_size,
- &pcie_gadget_attr_bar0_address,
- &pcie_gadget_attr_bar0_rw_offset,
- &pcie_gadget_attr_bar0_data,
- NULL,
-};
-
-static struct config_item_type pcie_gadget_target_type = {
- .ct_attrs = pcie_gadget_target_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
-{
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
-
- /*setup registers for outbound translation */
-
- writel(config->base, &app_reg->in0_mem_addr_start);
- writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
- &app_reg->in0_mem_addr_limit);
- writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
- writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
- &app_reg->in1_mem_addr_limit);
- writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
- writel(app_reg->in_io_addr_start + IN_IO_SIZE,
- &app_reg->in_io_addr_limit);
- writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
- writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
- &app_reg->in_cfg0_addr_limit);
- writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
- writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
- &app_reg->in_cfg1_addr_limit);
- writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
- writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
- &app_reg->in_msg_addr_limit);
-
- writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
- writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
- writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
-
- /*setup registers for inbound translation */
-
- /* Keep AORAM mapped at BAR0 as default */
- config->bar0_size = INBOUND_ADDR_MASK + 1;
- spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
- spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
- config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
- config->bar0_size);
-
- writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
- writel(0, &app_reg->pim1_mem_addr_start);
- writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
-
- writel(0x0, &app_reg->pim_io_addr_start);
- writel(0x0, &app_reg->pim_io_addr_start);
- writel(0x0, &app_reg->pim_rom_addr_start);
-
- writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
- | ((u32)1 << REG_TRANSLATION_ENABLE),
- &app_reg->app_ctrl_0);
- /* disable all rx interrupts */
- writel(0, &app_reg->int_mask);
-
- /* Select INTA as default*/
- spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
-}
-
-static int spear_pcie_gadget_probe(struct platform_device *pdev)
-{
- struct resource *res0, *res1;
- unsigned int status = 0;
- int irq;
- struct clk *clk;
- static struct pcie_gadget_target *target;
- struct spear_pcie_gadget_config *config;
- struct config_item *cg_item;
- struct configfs_subsystem *subsys;
-
- target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
- if (!target) {
- dev_err(&pdev->dev, "out of memory\n");
- return -ENOMEM;
- }
-
- cg_item = &target->subsys.su_group.cg_item;
- sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
- cg_item->ci_type = &pcie_gadget_target_type;
- config = &target->config;
-
- /* get resource for application registers*/
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
- if (IS_ERR(config->va_app_base)) {
- dev_err(&pdev->dev, "ioremap fail\n");
- return PTR_ERR(config->va_app_base);
- }
-
- /* get resource for dbi registers*/
- res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- config->base = (void __iomem *)res1->start;
-
- config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
- if (IS_ERR(config->va_dbi_base)) {
- dev_err(&pdev->dev, "ioremap fail\n");
- return PTR_ERR(config->va_dbi_base);
- }
-
- platform_set_drvdata(pdev, target);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no update irq?\n");
- return irq;
- }
-
- status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
- 0, pdev->name, NULL);
- if (status) {
- dev_err(&pdev->dev,
- "pcie gadget interrupt IRQ%d already claimed\n", irq);
- return status;
- }
-
- /* Register configfs hooks */
- subsys = &target->subsys;
- config_group_init(&subsys->su_group);
- mutex_init(&subsys->su_mutex);
- status = configfs_register_subsystem(subsys);
- if (status)
- return status;
-
- /*
- * init basic pcie application registers
- * do not enable clock if it is PCIE0.Ideally , all controller should
- * have been independent from others with respect to clock. But PCIE1
- * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
- */
- if (pdev->id == 1) {
- /*
- * Ideally CFG Clock should have been also enabled here. But
- * it is done currently during board init routne
- */
- clk = clk_get_sys("pcie1", NULL);
- if (IS_ERR(clk)) {
- pr_err("%s:couldn't get clk for pcie1\n", __func__);
- return PTR_ERR(clk);
- }
- status = clk_enable(clk);
- if (status) {
- pr_err("%s:couldn't enable clk for pcie1\n", __func__);
- return status;
- }
- } else if (pdev->id == 2) {
- /*
- * Ideally CFG Clock should have been also enabled here. But
- * it is done currently during board init routne
- */
- clk = clk_get_sys("pcie2", NULL);
- if (IS_ERR(clk)) {
- pr_err("%s:couldn't get clk for pcie2\n", __func__);
- return PTR_ERR(clk);
- }
- status = clk_enable(clk);
- if (status) {
- pr_err("%s:couldn't enable clk for pcie2\n", __func__);
- return status;
- }
- }
- spear13xx_pcie_device_init(config);
-
- return 0;
-}
-
-static int spear_pcie_gadget_remove(struct platform_device *pdev)
-{
- static struct pcie_gadget_target *target;
-
- target = platform_get_drvdata(pdev);
-
- configfs_unregister_subsystem(&target->subsys);
-
- return 0;
-}
-
-static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
-{
-}
-
-static struct platform_driver spear_pcie_gadget_driver = {
- .probe = spear_pcie_gadget_probe,
- .remove = spear_pcie_gadget_remove,
- .shutdown = spear_pcie_gadget_shutdown,
- .driver = {
- .name = "pcie-gadget-spear",
- .bus = &platform_bus_type
- },
-};
-
-module_platform_driver(spear_pcie_gadget_driver);
-
-MODULE_ALIAS("platform:pcie-gadget-spear");
-MODULE_AUTHOR("Pratyush Anand");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index f257d3812110..11835969e982 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -19,11 +19,150 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/highmem.h>
+
+#include <uapi/misc/xilinx_sdfec.h>
#define DEV_NAME_LEN 12
-static struct idr dev_idr;
-static struct mutex dev_idr_lock;
+static DEFINE_IDA(dev_nrs);
+
+/* Xilinx SDFEC Register Map */
+/* CODE_WRI_PROTECT Register */
+#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
+
+/* ACTIVE Register */
+#define XSDFEC_ACTIVE_ADDR (0x8)
+#define XSDFEC_IS_ACTIVITY_SET (0x1)
+
+/* AXIS_WIDTH Register */
+#define XSDFEC_AXIS_WIDTH_ADDR (0xC)
+#define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
+#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
+#define XSDFEC_AXIS_DIN_WORDS_LSB (2)
+#define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
+
+/* AXIS_ENABLE Register */
+#define XSDFEC_AXIS_ENABLE_ADDR (0x10)
+#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
+#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
+#define XSDFEC_AXIS_ENABLE_MASK \
+ (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
+
+/* FEC_CODE Register */
+#define XSDFEC_FEC_CODE_ADDR (0x14)
+
+/* ORDER Register Map */
+#define XSDFEC_ORDER_ADDR (0x18)
+
+/* Interrupt Status Register */
+#define XSDFEC_ISR_ADDR (0x1C)
+/* Interrupt Status Register Bit Mask */
+#define XSDFEC_ISR_MASK (0x3F)
+
+/* Write Only - Interrupt Enable Register */
+#define XSDFEC_IER_ADDR (0x20)
+/* Write Only - Interrupt Disable Register */
+#define XSDFEC_IDR_ADDR (0x24)
+/* Read Only - Interrupt Mask Register */
+#define XSDFEC_IMR_ADDR (0x28)
+
+/* ECC Interrupt Status Register */
+#define XSDFEC_ECC_ISR_ADDR (0x2C)
+/* Single Bit Errors */
+#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
+/* PL Initialize Single Bit Errors */
+#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
+/* Multi Bit Errors */
+#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
+/* PL Initialize Multi Bit Errors */
+#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
+/* Multi Bit Error to Event Shift */
+#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
+/* PL Initialize Multi Bit Error to Event Shift */
+#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
+/* ECC Interrupt Status Bit Mask */
+#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
+/* ECC Interrupt Status PL Initialize Bit Mask */
+#define XSDFEC_PL_INIT_ECC_ISR_MASK \
+ (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+/* ECC Interrupt Status All Bit Mask */
+#define XSDFEC_ALL_ECC_ISR_MASK \
+ (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
+/* ECC Interrupt Status Single Bit Errors Mask */
+#define XSDFEC_ALL_ECC_ISR_SBE_MASK \
+ (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
+/* ECC Interrupt Status Multi Bit Errors Mask */
+#define XSDFEC_ALL_ECC_ISR_MBE_MASK \
+ (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+
+/* Write Only - ECC Interrupt Enable Register */
+#define XSDFEC_ECC_IER_ADDR (0x30)
+/* Write Only - ECC Interrupt Disable Register */
+#define XSDFEC_ECC_IDR_ADDR (0x34)
+/* Read Only - ECC Interrupt Mask Register */
+#define XSDFEC_ECC_IMR_ADDR (0x38)
+
+/* BYPASS Register */
+#define XSDFEC_BYPASS_ADDR (0x3C)
+
+/* Turbo Code Register */
+#define XSDFEC_TURBO_ADDR (0x100)
+#define XSDFEC_TURBO_SCALE_MASK (0xFFF)
+#define XSDFEC_TURBO_SCALE_BIT_POS (8)
+#define XSDFEC_TURBO_SCALE_MAX (15)
+
+/* REG0 Register */
+#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
+#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
+#define XSDFEC_REG0_N_MIN (4)
+#define XSDFEC_REG0_N_MAX (32768)
+#define XSDFEC_REG0_N_MUL_P (256)
+#define XSDFEC_REG0_N_LSB (0)
+#define XSDFEC_REG0_K_MIN (2)
+#define XSDFEC_REG0_K_MAX (32766)
+#define XSDFEC_REG0_K_MUL_P (256)
+#define XSDFEC_REG0_K_LSB (16)
+
+/* REG1 Register */
+#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
+#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
+#define XSDFEC_REG1_PSIZE_MIN (2)
+#define XSDFEC_REG1_PSIZE_MAX (512)
+#define XSDFEC_REG1_NO_PACKING_MASK (0x400)
+#define XSDFEC_REG1_NO_PACKING_LSB (10)
+#define XSDFEC_REG1_NM_MASK (0xFF800)
+#define XSDFEC_REG1_NM_LSB (11)
+#define XSDFEC_REG1_BYPASS_MASK (0x100000)
+
+/* REG2 Register */
+#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
+#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
+#define XSDFEC_REG2_NLAYERS_MIN (1)
+#define XSDFEC_REG2_NLAYERS_MAX (256)
+#define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
+#define XSDFEC_REG2_NMQC_LSB (9)
+#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
+#define XSDFEC_REG2_NORM_TYPE_LSB (20)
+#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
+#define XSDFEC_REG2_SPEICAL_QC_LSB (21)
+#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
+#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
+#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
+#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
+
+/* REG3 Register */
+#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
+#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
+#define XSDFEC_REG3_LA_OFF_LSB (8)
+#define XSDFEC_REG3_QC_OFF_LSB (16)
+
+#define XSDFEC_LDPC_REG_JUMP (0x10)
+#define XSDFEC_REG_WIDTH_JUMP (4)
+
+/* The maximum number of pinned pages */
+#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
/**
* struct xsdfec_clks - For managing SD-FEC clocks
@@ -49,31 +188,1043 @@ struct xsdfec_clks {
/**
* struct xsdfec_dev - Driver data for SDFEC
- * @regs: device physical base address
- * @dev: pointer to device struct
* @miscdev: Misc device handle
- * @error_data_lock: Error counter and states spinlock
* @clks: Clocks managed by the SDFEC driver
+ * @waitq: Driver wait queue
+ * @config: Configuration of the SDFEC device
* @dev_name: Device name
+ * @flags: spinlock flags
+ * @regs: device physical base address
+ * @dev: pointer to device struct
+ * @state: State of the SDFEC device
+ * @error_data_lock: Error counter and states spinlock
* @dev_id: Device ID
+ * @isr_err_count: Count of ISR errors
+ * @cecc_count: Count of Correctable ECC errors (SBE)
+ * @uecc_count: Count of Uncorrectable ECC errors (MBE)
+ * @irq: IRQ number
+ * @state_updated: indicates State updated by interrupt handler
+ * @stats_updated: indicates Stats updated by interrupt handler
+ * @intr_enabled: indicates IRQ enabled
*
* This structure contains necessary state for SDFEC driver to operate
*/
struct xsdfec_dev {
+ struct miscdevice miscdev;
+ struct xsdfec_clks clks;
+ wait_queue_head_t waitq;
+ struct xsdfec_config config;
+ char dev_name[DEV_NAME_LEN];
+ unsigned long flags;
void __iomem *regs;
struct device *dev;
- struct miscdevice miscdev;
+ enum xsdfec_state state;
/* Spinlock to protect state_updated and stats_updated */
spinlock_t error_data_lock;
- struct xsdfec_clks clks;
- char dev_name[DEV_NAME_LEN];
int dev_id;
+ u32 isr_err_count;
+ u32 cecc_count;
+ u32 uecc_count;
+ int irq;
+ bool state_updated;
+ bool stats_updated;
+ bool intr_enabled;
};
+static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
+ u32 value)
+{
+ dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
+ iowrite32(value, xsdfec->regs + addr);
+}
+
+static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
+{
+ u32 rval;
+
+ rval = ioread32(xsdfec->regs + addr);
+ dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
+ return rval;
+}
+
+static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
+ u32 reg_offset, u32 bit_num,
+ char *config_value)
+{
+ u32 reg_val;
+ u32 bit_mask = 1 << bit_num;
+
+ reg_val = xsdfec_regread(xsdfec, reg_offset);
+ *config_value = (reg_val & bit_mask) > 0;
+}
+
+static void update_config_from_hw(struct xsdfec_dev *xsdfec)
+{
+ u32 reg_value;
+ bool sdfec_started;
+
+ /* Update the Order */
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
+ xsdfec->config.order = reg_value;
+
+ update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
+ 0, /* Bit Number, maybe change to mask */
+ &xsdfec->config.bypass);
+
+ update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
+ 0, /* Bit Number */
+ &xsdfec->config.code_wr_protect);
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ xsdfec->config.irq.enable_ecc_isr =
+ (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
+ sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
+ if (sdfec_started)
+ xsdfec->state = XSDFEC_STARTED;
+ else
+ xsdfec->state = XSDFEC_STOPPED;
+}
+
+static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_status status;
+ int err;
+
+ memset(&status, 0, sizeof(status));
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ status.state = xsdfec->state;
+ xsdfec->state_updated = false;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+ status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
+ XSDFEC_IS_ACTIVITY_SET);
+
+ err = copy_to_user(arg, &status, sizeof(status));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ int err;
+
+ err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if (mask_read & XSDFEC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC enabling irq with IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC disabling irq with IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC enabling ECC irq with ECC IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_ECC_ISR_MASK) ||
+ ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_PL_INIT_ECC_ISR_MASK))) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC disable ECC irq with ECC IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_irq irq;
+ int err;
+ int isr_err;
+ int ecc_err;
+
+ err = copy_from_user(&irq, arg, sizeof(irq));
+ if (err)
+ return -EFAULT;
+
+ /* Setup tlast related IRQ */
+ isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
+ if (!isr_err)
+ xsdfec->config.irq.enable_isr = irq.enable_isr;
+
+ /* Setup ECC related IRQ */
+ ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
+ if (!ecc_err)
+ xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
+
+ if (isr_err < 0 || ecc_err < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_turbo turbo;
+ int err;
+ u32 turbo_write;
+
+ err = copy_from_user(&turbo, arg, sizeof(turbo));
+ if (err)
+ return -EFAULT;
+
+ if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
+ return -EINVAL;
+
+ if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
+ return -EINVAL;
+
+ /* Check to see what device tree says about the FEC codes */
+ if (xsdfec->config.code == XSDFEC_LDPC_CODE)
+ return -EIO;
+
+ turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
+ << XSDFEC_TURBO_SCALE_BIT_POS) |
+ turbo.alg;
+ xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
+ return err;
+}
+
+static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ u32 reg_value;
+ struct xsdfec_turbo turbo_params;
+ int err;
+
+ if (xsdfec->config.code == XSDFEC_LDPC_CODE)
+ return -EIO;
+
+ memset(&turbo_params, 0, sizeof(turbo_params));
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
+
+ turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
+ XSDFEC_TURBO_SCALE_BIT_POS;
+ turbo_params.alg = reg_value & 0x1;
+
+ err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
+ u32 offset)
+{
+ u32 wdata;
+
+ if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
+ (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
+ dev_dbg(xsdfec->dev, "N value is not in range");
+ return -EINVAL;
+ }
+ n <<= XSDFEC_REG0_N_LSB;
+
+ if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
+ (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
+ dev_dbg(xsdfec->dev, "K value is not in range");
+ return -EINVAL;
+ }
+ k = k << XSDFEC_REG0_K_LSB;
+ wdata = k | n;
+
+ if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
+ XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
+ u32 no_packing, u32 nm, u32 offset)
+{
+ u32 wdata;
+
+ if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
+ dev_dbg(xsdfec->dev, "Psize is not in range");
+ return -EINVAL;
+ }
+
+ if (no_packing != 0 && no_packing != 1)
+ dev_dbg(xsdfec->dev, "No-packing bit register invalid");
+ no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
+ XSDFEC_REG1_NO_PACKING_MASK);
+
+ if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
+ dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
+ nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
+
+ wdata = nm | no_packing | psize;
+ if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
+ XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
+ u32 norm_type, u32 special_qc, u32 no_final_parity,
+ u32 max_schedule, u32 offset)
+{
+ u32 wdata;
+
+ if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
+ nlayers > XSDFEC_REG2_NLAYERS_MAX) {
+ dev_dbg(xsdfec->dev, "Nlayers is not in range");
+ return -EINVAL;
+ }
+
+ if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
+ dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
+ nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
+
+ if (norm_type > 1)
+ dev_dbg(xsdfec->dev, "Norm type is invalid");
+ norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
+ XSDFEC_REG2_NORM_TYPE_MASK);
+ if (special_qc > 1)
+ dev_dbg(xsdfec->dev, "Special QC in invalid");
+ special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
+ XSDFEC_REG2_SPECIAL_QC_MASK);
+
+ if (no_final_parity > 1)
+ dev_dbg(xsdfec->dev, "No final parity check invalid");
+ no_final_parity =
+ ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
+ XSDFEC_REG2_NO_FINAL_PARITY_MASK);
+ if (max_schedule &
+ ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
+ dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
+ max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
+ XSDFEC_REG2_MAX_SCHEDULE_MASK);
+
+ wdata = (max_schedule | no_final_parity | special_qc | norm_type |
+ nmqc | nlayers);
+
+ if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
+ XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
+ u16 qc_off, u32 offset)
+{
+ u32 wdata;
+
+ wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
+ (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
+ if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
+ XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
+ u32 *src_ptr, u32 len, const u32 base_addr,
+ const u32 depth)
+{
+ u32 reg = 0;
+ u32 res;
+ u32 n, i;
+ u32 *addr = NULL;
+ struct page *page[MAX_NUM_PAGES];
+
+ /*
+ * Writes that go beyond the length of
+ * Shared Scale(SC) table should fail
+ */
+ if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
+ len > depth / XSDFEC_REG_WIDTH_JUMP ||
+ offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
+ dev_dbg(xsdfec->dev, "Write exceeds SC table length");
+ return -EINVAL;
+ }
+
+ n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
+ if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
+ n += 1;
+
+ res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
+ if (res < n) {
+ for (i = 0; i < res; i++)
+ put_page(page[i]);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n; i++) {
+ addr = kmap(page[i]);
+ do {
+ xsdfec_regwrite(xsdfec,
+ base_addr + ((offset + reg) *
+ XSDFEC_REG_WIDTH_JUMP),
+ addr[reg]);
+ reg++;
+ } while ((reg < len) &&
+ ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
+ put_page(page[i]);
+ }
+ return reg;
+}
+
+static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_ldpc_params *ldpc;
+ int ret, n;
+
+ ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
+ if (!ldpc)
+ return -ENOMEM;
+
+ if (copy_from_user(ldpc, arg, sizeof(*ldpc))) {
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ if (xsdfec->config.code_wr_protect) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ /* Write Reg 0 */
+ ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 1 */
+ ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 2 */
+ ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
+ ldpc->norm_type, ldpc->special_qc,
+ ldpc->no_final_parity, ldpc->max_schedule,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 3 */
+ ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
+ ldpc->qc_off, ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Shared Codes */
+ n = ldpc->nlayers / 4;
+ if (ldpc->nlayers % 4)
+ n++;
+
+ ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
+ XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
+ XSDFEC_SC_TABLE_DEPTH);
+ if (ret < 0)
+ goto err_out;
+
+ ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
+ ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
+ XSDFEC_LA_TABLE_DEPTH);
+ if (ret < 0)
+ goto err_out;
+
+ ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
+ ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
+ XSDFEC_QC_TABLE_DEPTH);
+ if (ret > 0)
+ ret = 0;
+err_out:
+ kfree(ldpc);
+ return ret;
+}
+
+static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ bool order_invalid;
+ enum xsdfec_order order;
+ int err;
+
+ err = get_user(order, (enum xsdfec_order *)arg);
+ if (err)
+ return -EFAULT;
+
+ order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
+ (order != XSDFEC_OUT_OF_ORDER);
+ if (order_invalid)
+ return -EINVAL;
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED)
+ return -EIO;
+
+ xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
+
+ xsdfec->config.order = order;
+
+ return 0;
+}
+
+static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
+{
+ bool bypass;
+ int err;
+
+ err = get_user(bypass, arg);
+ if (err)
+ return -EFAULT;
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED)
+ return -EIO;
+
+ if (bypass)
+ xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
+ else
+ xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
+
+ xsdfec->config.bypass = bypass;
+
+ return 0;
+}
+
+static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
+{
+ u32 reg_value;
+ bool is_active;
+ int err;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
+ /* using a double ! operator instead of casting */
+ is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
+ err = put_user(is_active, arg);
+ if (err)
+ return -EFAULT;
+
+ return err;
+}
+
+static u32
+xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
+{
+ u32 axis_width_field = 0;
+
+ switch (axis_width_cfg) {
+ case XSDFEC_1x128b:
+ axis_width_field = 0;
+ break;
+ case XSDFEC_2x128b:
+ axis_width_field = 1;
+ break;
+ case XSDFEC_4x128b:
+ axis_width_field = 2;
+ break;
+ }
+
+ return axis_width_field;
+}
+
+static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
+ axis_word_inc_cfg)
+{
+ u32 axis_words_field = 0;
+
+ if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
+ axis_word_inc_cfg == XSDFEC_IN_BLOCK)
+ axis_words_field = 0;
+ else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
+ axis_words_field = 1;
+
+ return axis_words_field;
+}
+
+static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
+{
+ u32 reg_value;
+ u32 dout_words_field;
+ u32 dout_width_field;
+ u32 din_words_field;
+ u32 din_width_field;
+ struct xsdfec_config *config = &xsdfec->config;
+
+ /* translate config info to register values */
+ dout_words_field =
+ xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
+ dout_width_field =
+ xsdfec_translate_axis_width_cfg_val(config->dout_width);
+ din_words_field =
+ xsdfec_translate_axis_words_cfg_val(config->din_word_include);
+ din_width_field =
+ xsdfec_translate_axis_width_cfg_val(config->din_width);
+
+ reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
+ reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
+ reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
+ reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
+
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
+
+ return 0;
+}
+
+static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
+{
+ return 0;
+}
+
+static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
+{
+ return 0;
+}
+
+static int xsdfec_start(struct xsdfec_dev *xsdfec)
+{
+ u32 regread;
+
+ regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
+ regread &= 0x1;
+ if (regread != xsdfec->config.code) {
+ dev_dbg(xsdfec->dev,
+ "%s SDFEC HW code does not match driver code, reg %d, code %d",
+ __func__, regread, xsdfec->config.code);
+ return -EINVAL;
+ }
+
+ /* Set AXIS enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
+ XSDFEC_AXIS_ENABLE_MASK);
+ /* Done */
+ xsdfec->state = XSDFEC_STARTED;
+ return 0;
+}
+
+static int xsdfec_stop(struct xsdfec_dev *xsdfec)
+{
+ u32 regread;
+
+ if (xsdfec->state != XSDFEC_STARTED)
+ dev_dbg(xsdfec->dev, "Device not started correctly");
+ /* Disable AXIS_ENABLE Input interfaces only */
+ regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
+ regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
+ /* Stop */
+ xsdfec->state = XSDFEC_STOPPED;
+ return 0;
+}
+
+static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
+{
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ xsdfec->isr_err_count = 0;
+ xsdfec->uecc_count = 0;
+ xsdfec->cecc_count = 0;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ return 0;
+}
+
+static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ int err;
+ struct xsdfec_stats user_stats;
+
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ user_stats.isr_err_count = xsdfec->isr_err_count;
+ user_stats.cecc_count = xsdfec->cecc_count;
+ user_stats.uecc_count = xsdfec->uecc_count;
+ xsdfec->stats_updated = false;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ err = copy_to_user(arg, &user_stats, sizeof(user_stats));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
+{
+ /* Ensure registers are aligned with core configuration */
+ xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
+ xsdfec_cfg_axi_streams(xsdfec);
+ update_config_from_hw(xsdfec);
+
+ return 0;
+}
+
+static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ struct xsdfec_dev *xsdfec;
+ void __user *arg = NULL;
+ int rval = -EINVAL;
+
+ xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
+
+ /* In failed state allow only reset and get status IOCTLs */
+ if (xsdfec->state == XSDFEC_NEEDS_RESET &&
+ (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
+ cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
+ return -EPERM;
+ }
+
+ if (_IOC_TYPE(cmd) != XSDFEC_MAGIC)
+ return -ENOTTY;
+
+ /* check if ioctl argument is present and valid */
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
+ arg = (void __user *)data;
+ if (!arg)
+ return rval;
+ }
+
+ switch (cmd) {
+ case XSDFEC_START_DEV:
+ rval = xsdfec_start(xsdfec);
+ break;
+ case XSDFEC_STOP_DEV:
+ rval = xsdfec_stop(xsdfec);
+ break;
+ case XSDFEC_CLEAR_STATS:
+ rval = xsdfec_clear_stats(xsdfec);
+ break;
+ case XSDFEC_GET_STATS:
+ rval = xsdfec_get_stats(xsdfec, arg);
+ break;
+ case XSDFEC_GET_STATUS:
+ rval = xsdfec_get_status(xsdfec, arg);
+ break;
+ case XSDFEC_GET_CONFIG:
+ rval = xsdfec_get_config(xsdfec, arg);
+ break;
+ case XSDFEC_SET_DEFAULT_CONFIG:
+ rval = xsdfec_set_default_config(xsdfec);
+ break;
+ case XSDFEC_SET_IRQ:
+ rval = xsdfec_set_irq(xsdfec, arg);
+ break;
+ case XSDFEC_SET_TURBO:
+ rval = xsdfec_set_turbo(xsdfec, arg);
+ break;
+ case XSDFEC_GET_TURBO:
+ rval = xsdfec_get_turbo(xsdfec, arg);
+ break;
+ case XSDFEC_ADD_LDPC_CODE_PARAMS:
+ rval = xsdfec_add_ldpc(xsdfec, arg);
+ break;
+ case XSDFEC_SET_ORDER:
+ rval = xsdfec_set_order(xsdfec, arg);
+ break;
+ case XSDFEC_SET_BYPASS:
+ rval = xsdfec_set_bypass(xsdfec, arg);
+ break;
+ case XSDFEC_IS_ACTIVE:
+ rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
+ break;
+ default:
+ /* Should not get here */
+ break;
+ }
+ return rval;
+}
+
+#ifdef CONFIG_COMPAT
+static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long data)
+{
+ return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data));
+}
+#endif
+
+static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct xsdfec_dev *xsdfec;
+
+ xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
+
+ if (!xsdfec)
+ return POLLNVAL | POLLHUP;
+
+ poll_wait(file, &xsdfec->waitq, wait);
+
+ /* XSDFEC ISR detected an error */
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ if (xsdfec->state_updated)
+ mask |= POLLIN | POLLPRI;
+
+ if (xsdfec->stats_updated)
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ return mask;
+}
+
static const struct file_operations xsdfec_fops = {
.owner = THIS_MODULE,
+ .open = xsdfec_dev_open,
+ .release = xsdfec_dev_release,
+ .unlocked_ioctl = xsdfec_dev_ioctl,
+ .poll = xsdfec_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = xsdfec_dev_compat_ioctl,
+#endif
};
+static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
+{
+ struct device *dev = xsdfec->dev;
+ struct device_node *node = dev->of_node;
+ int rval;
+ const char *fec_code;
+ u32 din_width;
+ u32 din_word_include;
+ u32 dout_width;
+ u32 dout_word_include;
+
+ rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
+ if (rval < 0)
+ return rval;
+
+ if (!strcasecmp(fec_code, "ldpc"))
+ xsdfec->config.code = XSDFEC_LDPC_CODE;
+ else if (!strcasecmp(fec_code, "turbo"))
+ xsdfec->config.code = XSDFEC_TURBO_CODE;
+ else
+ return -EINVAL;
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
+ &din_word_include);
+ if (rval < 0)
+ return rval;
+
+ if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
+ xsdfec->config.din_word_include = din_word_include;
+ else
+ return -EINVAL;
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
+ if (rval < 0)
+ return rval;
+
+ switch (din_width) {
+ /* Fall through and set for valid values */
+ case XSDFEC_1x128b:
+ case XSDFEC_2x128b:
+ case XSDFEC_4x128b:
+ xsdfec->config.din_width = din_width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
+ &dout_word_include);
+ if (rval < 0)
+ return rval;
+
+ if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
+ xsdfec->config.dout_word_include = dout_word_include;
+ else
+ return -EINVAL;
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
+ if (rval < 0)
+ return rval;
+
+ switch (dout_width) {
+ /* Fall through and set for valid values */
+ case XSDFEC_1x128b:
+ case XSDFEC_2x128b:
+ case XSDFEC_4x128b:
+ xsdfec->config.dout_width = dout_width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write LDPC to CODE Register */
+ xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
+
+ xsdfec_cfg_axi_streams(xsdfec);
+
+ return 0;
+}
+
+static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
+{
+ struct xsdfec_dev *xsdfec = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 ecc_err;
+ u32 isr_err;
+ u32 uecc_count;
+ u32 cecc_count;
+ u32 isr_err_count;
+ u32 aecc_count;
+ u32 tmp;
+
+ WARN_ON(xsdfec->irq != irq);
+
+ /* Mask Interrupts */
+ xsdfec_isr_enable(xsdfec, false);
+ xsdfec_ecc_isr_enable(xsdfec, false);
+ /* Read ISR */
+ ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
+ isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
+ /* Clear the interrupts */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
+ xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
+
+ tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
+ /* Count uncorrectable 2-bit errors */
+ uecc_count = hweight32(tmp);
+ /* Count all ECC errors */
+ aecc_count = hweight32(ecc_err);
+ /* Number of correctable 1-bit ECC error */
+ cecc_count = aecc_count - 2 * uecc_count;
+ /* Count ISR errors */
+ isr_err_count = hweight32(isr_err);
+ dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
+ uecc_count, aecc_count, cecc_count, isr_err_count);
+ dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
+ xsdfec->cecc_count, xsdfec->isr_err_count);
+
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ /* Add new errors to a 2-bits counter */
+ if (uecc_count)
+ xsdfec->uecc_count += uecc_count;
+ /* Add new errors to a 1-bits counter */
+ if (cecc_count)
+ xsdfec->cecc_count += cecc_count;
+ /* Add new errors to a ISR counter */
+ if (isr_err_count)
+ xsdfec->isr_err_count += isr_err_count;
+
+ /* Update state/stats flag */
+ if (uecc_count) {
+ if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_PL_RECONFIGURE;
+ xsdfec->stats_updated = true;
+ xsdfec->state_updated = true;
+ }
+
+ if (cecc_count)
+ xsdfec->stats_updated = true;
+
+ if (isr_err_count) {
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ xsdfec->stats_updated = true;
+ xsdfec->state_updated = true;
+ }
+
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+ dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
+ xsdfec->stats_updated);
+
+ /* Enable another polling */
+ if (xsdfec->state_updated || xsdfec->stats_updated)
+ wake_up_interruptible(&xsdfec->waitq);
+ else
+ ret = IRQ_NONE;
+
+ /* Unmask Interrupts */
+ xsdfec_isr_enable(xsdfec, true);
+ xsdfec_ecc_isr_enable(xsdfec, true);
+
+ return ret;
+}
+
static int xsdfec_clk_init(struct platform_device *pdev,
struct xsdfec_clks *clks)
{
@@ -227,19 +1378,13 @@ static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
clk_disable_unprepare(clks->axi_clk);
}
-static void xsdfec_idr_remove(struct xsdfec_dev *xsdfec)
-{
- mutex_lock(&dev_idr_lock);
- idr_remove(&dev_idr, xsdfec->dev_id);
- mutex_unlock(&dev_idr_lock);
-}
-
static int xsdfec_probe(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
struct device *dev;
struct resource *res;
int err;
+ bool irq_enabled = true;
xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
if (!xsdfec)
@@ -260,12 +1405,34 @@ static int xsdfec_probe(struct platform_device *pdev)
goto err_xsdfec_dev;
}
+ xsdfec->irq = platform_get_irq(pdev, 0);
+ if (xsdfec->irq < 0) {
+ dev_dbg(dev, "platform_get_irq failed");
+ irq_enabled = false;
+ }
+
+ err = xsdfec_parse_of(xsdfec);
+ if (err < 0)
+ goto err_xsdfec_dev;
+
+ update_config_from_hw(xsdfec);
+
/* Save driver private data */
platform_set_drvdata(pdev, xsdfec);
- mutex_lock(&dev_idr_lock);
- err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL);
- mutex_unlock(&dev_idr_lock);
+ if (irq_enabled) {
+ init_waitqueue_head(&xsdfec->waitq);
+ /* Register IRQ thread */
+ err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
+ xsdfec_irq_thread, IRQF_ONESHOT,
+ "xilinx-sdfec16", xsdfec);
+ if (err < 0) {
+ dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
+ goto err_xsdfec_dev;
+ }
+ }
+
+ err = ida_alloc(&dev_nrs, GFP_KERNEL);
if (err < 0)
goto err_xsdfec_dev;
xsdfec->dev_id = err;
@@ -278,12 +1445,12 @@ static int xsdfec_probe(struct platform_device *pdev)
err = misc_register(&xsdfec->miscdev);
if (err) {
dev_err(dev, "error:%d. Unable to register device", err);
- goto err_xsdfec_idr;
+ goto err_xsdfec_ida;
}
return 0;
-err_xsdfec_idr:
- xsdfec_idr_remove(xsdfec);
+err_xsdfec_ida:
+ ida_free(&dev_nrs, xsdfec->dev_id);
err_xsdfec_dev:
xsdfec_disable_all_clks(&xsdfec->clks);
return err;
@@ -295,7 +1462,7 @@ static int xsdfec_remove(struct platform_device *pdev)
xsdfec = platform_get_drvdata(pdev);
misc_deregister(&xsdfec->miscdev);
- xsdfec_idr_remove(xsdfec);
+ ida_free(&dev_nrs, xsdfec->dev_id);
xsdfec_disable_all_clks(&xsdfec->clks);
return 0;
}
@@ -321,8 +1488,6 @@ static int __init xsdfec_init(void)
{
int err;
- mutex_init(&dev_idr_lock);
- idr_init(&dev_idr);
err = platform_driver_register(&xsdfec_driver);
if (err < 0) {
pr_err("%s Unabled to register SDFEC driver", __func__);
@@ -334,7 +1499,6 @@ static int __init xsdfec_init(void)
static void __exit xsdfec_exit(void)
{
platform_driver_unregister(&xsdfec_driver);
- idr_destroy(&dev_idr);
}
module_init(xsdfec_init);
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index d9dc482ecb2f..61a17f943f47 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -16,6 +16,7 @@
enum ocotp_devtype {
IMX8QXP,
+ IMX8QM,
};
struct ocotp_devtype_data {
@@ -39,6 +40,11 @@ static struct ocotp_devtype_data imx8qxp_data = {
.nregs = 800,
};
+static struct ocotp_devtype_data imx8qm_data = {
+ .devtype = IMX8QM,
+ .nregs = 800,
+};
+
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
@@ -118,6 +124,7 @@ static struct nvmem_config imx_scu_ocotp_nvmem_config = {
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data },
+ { .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data },
{ },
};
MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 42d4451e7d67..dff2f3c357f5 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -479,6 +479,12 @@ static const struct ocotp_params imx8mm_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx8mn_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -490,6 +496,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
{ .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
+ { .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index b9f9ce089de9..07c9f38c1c60 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -155,7 +155,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
if (err)
break;
- memcpy(buf + i, &tmp, efuse->config.word_size);
+ memcpy(buf + i, &tmp,
+ min_t(size_t, bytes - i, efuse->config.word_size));
}
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index c34d9fecfb10..8e4898dec002 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -200,6 +200,6 @@ static struct platform_driver mxs_ocotp_driver = {
};
module_platform_driver(mxs_ocotp_driver);
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net");
MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index a079a80ddf2c..e26ef1bbf198 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -186,6 +186,7 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.value_offset = 0x200,
.size = 0x100,
+ .need_register_readout = true,
};
static const struct sunxi_sid_cfg sun50i_h6_cfg = {
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index 6fa41f8173b6..022c566c0f32 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -19,4 +19,4 @@ obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o
-obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o \ No newline at end of file
+obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 60d5d985113c..96b888bb49c6 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -680,8 +680,7 @@ static void parport_serial_pci_remove(struct pci_dev *dev)
static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct parport_serial_private *priv = pci_get_drvdata(pdev);
+ struct parport_serial_private *priv = dev_get_drvdata(dev);
if (priv->serial)
pciserial_suspend_ports(priv->serial);
@@ -692,8 +691,7 @@ static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
static int __maybe_unused parport_serial_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct parport_serial_private *priv = pci_get_drvdata(pdev);
+ struct parport_serial_private *priv = dev_get_drvdata(dev);
if (priv->serial)
pciserial_resume_ports(priv->serial);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index ec54a2aa5cb8..245d60189375 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -117,9 +117,9 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
if (card_present(i)) {
sockets[i].card_state = 3;
- dprintk(KERN_DEBUG "i82092aa: slot %i is occupied\n",i);
+ dev_dbg(&dev->dev, "i82092aa: slot %i is occupied\n", i);
} else {
- dprintk(KERN_DEBUG "i82092aa: slot %i is vacant\n",i);
+ dev_dbg(&dev->dev, "i82092aa: slot %i is vacant\n", i);
}
}
@@ -128,7 +128,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
pci_write_config_byte(dev, 0x50, configbyte); /* PCI Interrupt Routing Register */
/* Register the interrupt handler */
- dprintk(KERN_DEBUG "Requesting interrupt %i \n",dev->irq);
+ dev_dbg(&dev->dev, "Requesting interrupt %i\n", dev->irq);
if ((ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED, "i82092aa", i82092aa_interrupt))) {
printk(KERN_ERR "i82092aa: Failed to register IRQ %d, aborting\n", dev->irq);
goto err_out_free_res;
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 0d9fddc498a6..c96a1afc95bd 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_ARCH_SUNXI) += allwinner/
obj-$(CONFIG_ARCH_MESON) += amlogic/
-obj-$(CONFIG_LANTIQ) += lantiq/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
@@ -19,6 +18,7 @@ obj-y += broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
+ lantiq/ \
marvell/ \
motorola/ \
mscc/ \
diff --git a/drivers/phy/lantiq/Kconfig b/drivers/phy/lantiq/Kconfig
index eb66c857ce25..c4df9709d53f 100644
--- a/drivers/phy/lantiq/Kconfig
+++ b/drivers/phy/lantiq/Kconfig
@@ -2,6 +2,17 @@
#
# Phy drivers for Lantiq / Intel platforms
#
+config PHY_LANTIQ_VRX200_PCIE
+ tristate "Lantiq VRX200/ARX300 PCIe PHY"
+ depends on SOC_TYPE_XWAY || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Support for the PCIe PHY(s) on the Lantiq / Intel VRX200 and ARX300
+ family SoCs.
+ If unsure, say N.
+
config PHY_LANTIQ_RCU_USB2
tristate "Lantiq XWAY SoC RCU based USB PHY"
depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST)
diff --git a/drivers/phy/lantiq/Makefile b/drivers/phy/lantiq/Makefile
index 540049039092..7c14eb24ab73 100644
--- a/drivers/phy/lantiq/Makefile
+++ b/drivers/phy/lantiq/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o
+obj-$(CONFIG_PHY_LANTIQ_VRX200_PCIE) += phy-lantiq-vrx200-pcie.o
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
new file mode 100644
index 000000000000..544d64a84cc0
--- /dev/null
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe PHY driver for Lantiq VRX200 and ARX300 SoCs.
+ *
+ * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on the BSP (called "UGW") driver:
+ * Copyright (C) 2009-2015 Lei Chuanhua <chuanhua.lei@lantiq.com>
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * TODO: PHY modes other than 36MHz (without "SSC")
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/phy/phy-lantiq-vrx200-pcie.h>
+
+#define PCIE_PHY_PLL_CTRL1 0x44
+
+#define PCIE_PHY_PLL_CTRL2 0x46
+#define PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK GENMASK(7, 0)
+#define PCIE_PHY_PLL_CTRL2_CONST_SDM_EN BIT(8)
+#define PCIE_PHY_PLL_CTRL2_PLL_SDM_EN BIT(9)
+
+#define PCIE_PHY_PLL_CTRL3 0x48
+#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN BIT(1)
+#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK GENMASK(6, 4)
+
+#define PCIE_PHY_PLL_CTRL4 0x4a
+#define PCIE_PHY_PLL_CTRL5 0x4c
+#define PCIE_PHY_PLL_CTRL6 0x4e
+#define PCIE_PHY_PLL_CTRL7 0x50
+#define PCIE_PHY_PLL_A_CTRL1 0x52
+
+#define PCIE_PHY_PLL_A_CTRL2 0x54
+#define PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN BIT(14)
+
+#define PCIE_PHY_PLL_A_CTRL3 0x56
+#define PCIE_PHY_PLL_A_CTRL3_MMD_MASK GENMASK(15, 13)
+
+#define PCIE_PHY_PLL_STATUS 0x58
+
+#define PCIE_PHY_TX1_CTRL1 0x60
+#define PCIE_PHY_TX1_CTRL1_FORCE_EN BIT(3)
+#define PCIE_PHY_TX1_CTRL1_LOAD_EN BIT(4)
+
+#define PCIE_PHY_TX1_CTRL2 0x62
+#define PCIE_PHY_TX1_CTRL3 0x64
+#define PCIE_PHY_TX1_A_CTRL1 0x66
+#define PCIE_PHY_TX1_A_CTRL2 0x68
+#define PCIE_PHY_TX1_MOD1 0x6a
+#define PCIE_PHY_TX1_MOD2 0x6c
+#define PCIE_PHY_TX1_MOD3 0x6e
+
+#define PCIE_PHY_TX2_CTRL1 0x70
+#define PCIE_PHY_TX2_CTRL1_LOAD_EN BIT(4)
+
+#define PCIE_PHY_TX2_CTRL2 0x72
+#define PCIE_PHY_TX2_A_CTRL1 0x76
+#define PCIE_PHY_TX2_A_CTRL2 0x78
+#define PCIE_PHY_TX2_MOD1 0x7a
+#define PCIE_PHY_TX2_MOD2 0x7c
+#define PCIE_PHY_TX2_MOD3 0x7e
+
+#define PCIE_PHY_RX1_CTRL1 0xa0
+#define PCIE_PHY_RX1_CTRL1_LOAD_EN BIT(1)
+
+#define PCIE_PHY_RX1_CTRL2 0xa2
+#define PCIE_PHY_RX1_CDR 0xa4
+#define PCIE_PHY_RX1_EI 0xa6
+#define PCIE_PHY_RX1_A_CTRL 0xaa
+
+struct ltq_vrx200_pcie_phy_priv {
+ struct phy *phy;
+ unsigned int mode;
+ struct device *dev;
+ struct regmap *phy_regmap;
+ struct regmap *rcu_regmap;
+ struct clk *pdi_clk;
+ struct clk *phy_clk;
+ struct reset_control *phy_reset;
+ struct reset_control *pcie_reset;
+ u32 rcu_ahb_endian_offset;
+ u32 rcu_ahb_endian_big_endian_mask;
+};
+
+static void ltq_vrx200_pcie_phy_common_setup(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+
+ /* PLL Setting */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL1, 0x120e);
+
+ /* increase the bias reference voltage */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2, 0x39d7);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3, 0x0900);
+
+ /* Endcnt */
+ regmap_write(priv->phy_regmap, PCIE_PHY_RX1_EI, 0x0004);
+ regmap_write(priv->phy_regmap, PCIE_PHY_RX1_A_CTRL, 0x6803);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX1_CTRL1,
+ PCIE_PHY_TX1_CTRL1_FORCE_EN,
+ PCIE_PHY_TX1_CTRL1_FORCE_EN);
+
+ /* predrv_ser_en */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL2, 0x0706);
+
+ /* ctrl_lim */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL3, 0x1fff);
+
+ /* ctrl */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL1, 0x0810);
+
+ /* predrv_ser_en */
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x7f00,
+ 0x4700);
+
+ /* RTERM */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL2, 0x2e00);
+
+ /* Improved 100MHz clock output */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_CTRL2, 0x3096);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x4707);
+
+ /* Reduced CDR BW to avoid glitches */
+ regmap_write(priv->phy_regmap, PCIE_PHY_RX1_CDR, 0x0235);
+}
+
+static void pcie_phy_36mhz_mode_setup(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
+ PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN, 0x0000);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
+ PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK, 0x0000);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
+ PCIE_PHY_PLL_CTRL2_PLL_SDM_EN,
+ PCIE_PHY_PLL_CTRL2_PLL_SDM_EN);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
+ PCIE_PHY_PLL_CTRL2_CONST_SDM_EN,
+ PCIE_PHY_PLL_CTRL2_CONST_SDM_EN);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3,
+ PCIE_PHY_PLL_A_CTRL3_MMD_MASK,
+ FIELD_PREP(PCIE_PHY_PLL_A_CTRL3_MMD_MASK, 0x1));
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2,
+ PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN, 0x0000);
+
+ /* const_sdm */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL1, 0x38e4);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
+ PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
+ FIELD_PREP(PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
+ 0xee));
+
+ /* pllmod */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL7, 0x0002);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL6, 0x3a04);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL5, 0xfae3);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL4, 0x1b72);
+}
+
+static int ltq_vrx200_pcie_phy_wait_for_pll(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ unsigned int tmp;
+ int ret;
+
+ ret = regmap_read_poll_timeout(priv->phy_regmap, PCIE_PHY_PLL_STATUS,
+ tmp, ((tmp & 0x0070) == 0x0070), 10,
+ 10000);
+ if (ret) {
+ dev_err(priv->dev, "PLL Link timeout, PLL status = 0x%04x\n",
+ tmp);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltq_vrx200_pcie_phy_apply_workarounds(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ static const struct reg_default slices[] = {
+ {
+ .reg = PCIE_PHY_TX1_CTRL1,
+ .def = PCIE_PHY_TX1_CTRL1_LOAD_EN,
+ },
+ {
+ .reg = PCIE_PHY_TX2_CTRL1,
+ .def = PCIE_PHY_TX2_CTRL1_LOAD_EN,
+ },
+ {
+ .reg = PCIE_PHY_RX1_CTRL1,
+ .def = PCIE_PHY_RX1_CTRL1_LOAD_EN,
+ }
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(slices); i++) {
+ /* enable load_en */
+ regmap_update_bits(priv->phy_regmap, slices[i].reg,
+ slices[i].def, slices[i].def);
+
+ udelay(1);
+
+ /* disable load_en */
+ regmap_update_bits(priv->phy_regmap, slices[i].reg,
+ slices[i].def, 0x0);
+ }
+
+ for (i = 0; i < 5; i++) {
+ /* TX2 modulation */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD1, 0x1ffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD2, 0xfffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0601);
+ usleep_range(1000, 2000);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0001);
+
+ /* TX1 modulation */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD1, 0x1ffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD2, 0xfffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0601);
+ usleep_range(1000, 2000);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0001);
+ }
+}
+
+static int ltq_vrx200_pcie_phy_init(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ if (of_device_is_big_endian(priv->dev->of_node))
+ regmap_update_bits(priv->rcu_regmap,
+ priv->rcu_ahb_endian_offset,
+ priv->rcu_ahb_endian_big_endian_mask,
+ priv->rcu_ahb_endian_big_endian_mask);
+ else
+ regmap_update_bits(priv->rcu_regmap,
+ priv->rcu_ahb_endian_offset,
+ priv->rcu_ahb_endian_big_endian_mask, 0x0);
+
+ ret = reset_control_assert(priv->phy_reset);
+ if (ret)
+ goto err;
+
+ udelay(1);
+
+ ret = reset_control_deassert(priv->phy_reset);
+ if (ret)
+ goto err;
+
+ udelay(1);
+
+ ret = reset_control_deassert(priv->pcie_reset);
+ if (ret)
+ goto err_assert_phy_reset;
+
+ /* Make sure PHY PLL is stable */
+ usleep_range(20, 40);
+
+ return 0;
+
+err_assert_phy_reset:
+ reset_control_assert(priv->phy_reset);
+err:
+ return ret;
+}
+
+static int ltq_vrx200_pcie_phy_exit(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_assert(priv->pcie_reset);
+ if (ret)
+ return ret;
+
+ ret = reset_control_assert(priv->phy_reset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Enable PDI to access PCIe PHY register */
+ ret = clk_prepare_enable(priv->pdi_clk);
+ if (ret)
+ goto err;
+
+ /* Configure PLL and PHY clock */
+ ltq_vrx200_pcie_phy_common_setup(phy);
+
+ pcie_phy_36mhz_mode_setup(phy);
+
+ /* Enable the PCIe PHY and make PLL setting take effect */
+ ret = clk_prepare_enable(priv->phy_clk);
+ if (ret)
+ goto err_disable_pdi_clk;
+
+ /* Check if we are in "startup ready" status */
+ if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
+ goto err_disable_phy_clk;
+
+ ltq_vrx200_pcie_phy_apply_workarounds(phy);
+
+ return 0;
+
+err_disable_phy_clk:
+ clk_disable_unprepare(priv->phy_clk);
+err_disable_pdi_clk:
+ clk_disable_unprepare(priv->pdi_clk);
+err:
+ return ret;
+}
+
+static int ltq_vrx200_pcie_phy_power_off(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(priv->phy_clk);
+ clk_disable_unprepare(priv->pdi_clk);
+
+ return 0;
+}
+
+static struct phy_ops ltq_vrx200_pcie_phy_ops = {
+ .init = ltq_vrx200_pcie_phy_init,
+ .exit = ltq_vrx200_pcie_phy_exit,
+ .power_on = ltq_vrx200_pcie_phy_power_on,
+ .power_off = ltq_vrx200_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = dev_get_drvdata(dev);
+ unsigned int mode;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mode = args->args[0];
+
+ switch (mode) {
+ case LANTIQ_PCIE_PHY_MODE_36MHZ:
+ priv->mode = mode;
+ break;
+
+ case LANTIQ_PCIE_PHY_MODE_25MHZ:
+ case LANTIQ_PCIE_PHY_MODE_25MHZ_SSC:
+ case LANTIQ_PCIE_PHY_MODE_36MHZ_SSC:
+ case LANTIQ_PCIE_PHY_MODE_100MHZ:
+ case LANTIQ_PCIE_PHY_MODE_100MHZ_SSC:
+ dev_err(dev, "PHY mode not implemented yet: %u\n", mode);
+ return ERR_PTR(-EINVAL);
+
+ default:
+ dev_err(dev, "invalid PHY mode %u\n", mode);
+ return ERR_PTR(-EINVAL);
+ };
+
+ return priv->phy;
+}
+
+static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev)
+{
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = PCIE_PHY_RX1_A_CTRL,
+ };
+ struct ltq_vrx200_pcie_phy_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->phy_regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(priv->phy_regmap))
+ return PTR_ERR(priv->phy_regmap);
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ ret = device_property_read_u32(dev, "lantiq,rcu-endian-offset",
+ &priv->rcu_ahb_endian_offset);
+ if (ret) {
+ dev_err(dev,
+ "failed to parse the 'lantiq,rcu-endian-offset' property\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "lantiq,rcu-big-endian-mask",
+ &priv->rcu_ahb_endian_big_endian_mask);
+ if (ret) {
+ dev_err(dev,
+ "failed to parse the 'lantiq,rcu-big-endian-mask' property\n");
+ return ret;
+ }
+
+ priv->pdi_clk = devm_clk_get(dev, "pdi");
+ if (IS_ERR(priv->pdi_clk))
+ return PTR_ERR(priv->pdi_clk);
+
+ priv->phy_clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->phy_clk))
+ return PTR_ERR(priv->phy_clk);
+
+ priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(priv->phy_reset))
+ return PTR_ERR(priv->phy_reset);
+
+ priv->pcie_reset = devm_reset_control_get_shared(dev, "pcie");
+ if (IS_ERR(priv->pcie_reset))
+ return PTR_ERR(priv->pcie_reset);
+
+ priv->dev = dev;
+
+ priv->phy = devm_phy_create(dev, dev->of_node,
+ &ltq_vrx200_pcie_phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+
+ provider = devm_of_phy_provider_register(dev,
+ ltq_vrx200_pcie_phy_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id ltq_vrx200_pcie_phy_of_match[] = {
+ { .compatible = "lantiq,vrx200-pcie-phy", },
+ { .compatible = "lantiq,arx300-pcie-phy", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ltq_vrx200_pcie_phy_of_match);
+
+static struct platform_driver ltq_vrx200_pcie_phy_driver = {
+ .probe = ltq_vrx200_pcie_phy_probe,
+ .driver = {
+ .name = "ltq-vrx200-pcie-phy",
+ .of_match_table = ltq_vrx200_pcie_phy_of_match,
+ }
+};
+module_platform_driver(ltq_vrx200_pcie_phy_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Lantiq VRX200 and ARX300 PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 0e1642419c0b..4053ba6cd0fb 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -57,6 +57,7 @@ config PHY_MVEBU_CP110_COMPHY
tristate "Marvell CP110 comphy driver"
depends on ARCH_MVEBU || COMPILE_TEST
depends on OF
+ depends on HAVE_ARM_SMCCC
select GENERIC_PHY
help
This driver allows to control the comphy, an hardware block providing
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 3e00bc679d4e..6960dfd8ad8c 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -200,8 +200,10 @@ static int a38x_comphy_probe(struct platform_device *pdev)
}
phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ of_node_put(child);
return PTR_ERR(phy);
+ }
priv->lane[val].base = base + 0x28 * val;
priv->lane[val].priv = priv;
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 8812a104c233..1a138be8bd6a 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -26,6 +26,7 @@
#define COMPHY_SIP_POWER_ON 0x82000001
#define COMPHY_SIP_POWER_OFF 0x82000002
#define COMPHY_SIP_PLL_LOCK 0x82000003
+#define COMPHY_FW_NOT_SUPPORTED (-1)
#define COMPHY_FW_MODE_SATA 0x1
#define COMPHY_FW_MODE_SGMII 0x2
@@ -169,6 +170,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
u32 fw_param;
int fw_mode;
+ int ret;
fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port,
lane->mode, lane->submode);
@@ -217,7 +219,12 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
return -ENOTSUPP;
}
- return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
+ ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
+ if (ret == COMPHY_FW_NOT_SUPPORTED)
+ dev_err(lane->dev,
+ "unsupported SMC call, try updating your firmware\n");
+
+ return ret;
}
static int mvebu_a3700_comphy_power_off(struct phy *phy)
@@ -277,13 +284,17 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
}
lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
- if (!lane)
+ if (!lane) {
+ of_node_put(child);
return -ENOMEM;
+ }
phy = devm_phy_create(&pdev->dev, child,
&mvebu_a3700_comphy_ops);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ of_node_put(child);
return PTR_ERR(phy);
+ }
lane->dev = &pdev->dev;
lane->mode = PHY_MODE_INVALID;
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index d98e0451f6a1..e3b87c94aaf6 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -5,6 +5,8 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
@@ -22,6 +24,7 @@
#define MVEBU_COMPHY_SERDES_CFG0_PU_RX BIT(11)
#define MVEBU_COMPHY_SERDES_CFG0_PU_TX BIT(12)
#define MVEBU_COMPHY_SERDES_CFG0_HALF_BUS BIT(14)
+#define MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE BIT(15)
#define MVEBU_COMPHY_SERDES_CFG1(n) (0x4 + (n) * 0x1000)
#define MVEBU_COMPHY_SERDES_CFG1_RESET BIT(3)
#define MVEBU_COMPHY_SERDES_CFG1_RX_INIT BIT(4)
@@ -77,8 +80,8 @@
#define MVEBU_COMPHY_TX_SLEW_RATE(n) (0x974 + (n) * 0x1000)
#define MVEBU_COMPHY_TX_SLEW_RATE_EMPH(n) ((n) << 5)
#define MVEBU_COMPHY_TX_SLEW_RATE_SLC(n) ((n) << 10)
-#define MVEBU_COMPHY_DLT_CTRL(n) (0x984 + (n) * 0x1000)
-#define MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN BIT(2)
+#define MVEBU_COMPHY_DTL_CTRL(n) (0x984 + (n) * 0x1000)
+#define MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN BIT(2)
#define MVEBU_COMPHY_FRAME_DETECT0(n) (0xa14 + (n) * 0x1000)
#define MVEBU_COMPHY_FRAME_DETECT0_PATN(n) ((n) << 7)
#define MVEBU_COMPHY_FRAME_DETECT3(n) (0xa20 + (n) * 0x1000)
@@ -111,55 +114,151 @@
#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
+#define MVEBU_COMPHY_SD1_CTRL1 0x1148
+#define MVEBU_COMPHY_SD1_CTRL1_RXAUI1_EN BIT(26)
+#define MVEBU_COMPHY_SD1_CTRL1_RXAUI0_EN BIT(27)
#define MVEBU_COMPHY_LANES 6
#define MVEBU_COMPHY_PORTS 3
+#define COMPHY_SIP_POWER_ON 0x82000001
+#define COMPHY_SIP_POWER_OFF 0x82000002
+#define COMPHY_FW_NOT_SUPPORTED (-1)
+
+/*
+ * A lane is described by the following bitfields:
+ * [ 1- 0]: COMPHY polarity invertion
+ * [ 2- 7]: COMPHY speed
+ * [ 5-11]: COMPHY port index
+ * [12-16]: COMPHY mode
+ * [17]: Clock source
+ * [18-20]: PCIe width (x1, x2, x4)
+ */
+#define COMPHY_FW_POL_OFFSET 0
+#define COMPHY_FW_POL_MASK GENMASK(1, 0)
+#define COMPHY_FW_SPEED_OFFSET 2
+#define COMPHY_FW_SPEED_MASK GENMASK(7, 2)
+#define COMPHY_FW_SPEED_MAX COMPHY_FW_SPEED_MASK
+#define COMPHY_FW_SPEED_1250 0
+#define COMPHY_FW_SPEED_3125 2
+#define COMPHY_FW_SPEED_5000 3
+#define COMPHY_FW_SPEED_103125 6
+#define COMPHY_FW_PORT_OFFSET 8
+#define COMPHY_FW_PORT_MASK GENMASK(11, 8)
+#define COMPHY_FW_MODE_OFFSET 12
+#define COMPHY_FW_MODE_MASK GENMASK(16, 12)
+#define COMPHY_FW_WIDTH_OFFSET 18
+#define COMPHY_FW_WIDTH_MASK GENMASK(20, 18)
+
+#define COMPHY_FW_PARAM_FULL(mode, port, speed, pol, width) \
+ ((((pol) << COMPHY_FW_POL_OFFSET) & COMPHY_FW_POL_MASK) | \
+ (((mode) << COMPHY_FW_MODE_OFFSET) & COMPHY_FW_MODE_MASK) | \
+ (((port) << COMPHY_FW_PORT_OFFSET) & COMPHY_FW_PORT_MASK) | \
+ (((speed) << COMPHY_FW_SPEED_OFFSET) & COMPHY_FW_SPEED_MASK) | \
+ (((width) << COMPHY_FW_WIDTH_OFFSET) & COMPHY_FW_WIDTH_MASK))
+
+#define COMPHY_FW_PARAM(mode, port) \
+ COMPHY_FW_PARAM_FULL(mode, port, COMPHY_FW_SPEED_MAX, 0, 0)
+
+#define COMPHY_FW_PARAM_ETH(mode, port, speed) \
+ COMPHY_FW_PARAM_FULL(mode, port, speed, 0, 0)
+
+#define COMPHY_FW_PARAM_PCIE(mode, port, width) \
+ COMPHY_FW_PARAM_FULL(mode, port, COMPHY_FW_SPEED_5000, 0, width)
+
+#define COMPHY_FW_MODE_SATA 0x1
+#define COMPHY_FW_MODE_SGMII 0x2 /* SGMII 1G */
+#define COMPHY_FW_MODE_HS_SGMII 0x3 /* SGMII 2.5G */
+#define COMPHY_FW_MODE_USB3H 0x4
+#define COMPHY_FW_MODE_USB3D 0x5
+#define COMPHY_FW_MODE_PCIE 0x6
+#define COMPHY_FW_MODE_RXAUI 0x7
+#define COMPHY_FW_MODE_XFI 0x8 /* SFI: 0x9 (is treated like XFI) */
+
struct mvebu_comphy_conf {
enum phy_mode mode;
int submode;
unsigned lane;
unsigned port;
u32 mux;
+ u32 fw_mode;
};
-#define MVEBU_COMPHY_CONF(_lane, _port, _submode, _mux) \
+#define ETH_CONF(_lane, _port, _submode, _mux, _fw) \
{ \
.lane = _lane, \
.port = _port, \
.mode = PHY_MODE_ETHERNET, \
.submode = _submode, \
.mux = _mux, \
+ .fw_mode = _fw, \
+ }
+
+#define GEN_CONF(_lane, _port, _mode, _fw) \
+ { \
+ .lane = _lane, \
+ .port = _port, \
+ .mode = _mode, \
+ .submode = PHY_INTERFACE_MODE_NA, \
+ .mux = -1, \
+ .fw_mode = _fw, \
}
static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 0 */
- MVEBU_COMPHY_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1),
+ GEN_CONF(0, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ GEN_CONF(0, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
/* lane 1 */
- MVEBU_COMPHY_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1),
+ GEN_CONF(1, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(1, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
+ GEN_CONF(1, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
+ GEN_CONF(1, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
/* lane 2 */
- MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1),
- MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1, COMPHY_FW_MODE_XFI),
+ GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
+ GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
/* lane 3 */
- MVEBU_COMPHY_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2),
- MVEBU_COMPHY_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2),
+ GEN_CONF(3, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
+ GEN_CONF(3, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(3, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
/* lane 4 */
- MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2),
- MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2),
- MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2),
- MVEBU_COMPHY_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
+ GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
+ GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GKR, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
- MVEBU_COMPHY_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1),
+ ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
+ GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
+ ETH_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ GEN_CONF(5, 2, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
};
struct mvebu_comphy_priv {
void __iomem *base;
struct regmap *regmap;
struct device *dev;
+ struct clk *mg_domain_clk;
+ struct clk *mg_core_clk;
+ struct clk *axi_clk;
+ unsigned long cp_phys;
};
struct mvebu_comphy_lane {
@@ -170,30 +269,59 @@ struct mvebu_comphy_lane {
int port;
};
-static int mvebu_comphy_get_mux(int lane, int port,
- enum phy_mode mode, int submode)
+static int mvebu_comphy_smc(unsigned long function, unsigned long phys,
+ unsigned long lane, unsigned long mode)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(function, phys, lane, mode, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static int mvebu_comphy_get_mode(bool fw_mode, int lane, int port,
+ enum phy_mode mode, int submode)
{
int i, n = ARRAY_SIZE(mvebu_comphy_cp110_modes);
+ /* Ignore PCIe submode: it represents the width */
+ bool ignore_submode = (mode == PHY_MODE_PCIE);
+ const struct mvebu_comphy_conf *conf;
/* Unused PHY mux value is 0x0 */
if (mode == PHY_MODE_INVALID)
return 0;
for (i = 0; i < n; i++) {
- if (mvebu_comphy_cp110_modes[i].lane == lane &&
- mvebu_comphy_cp110_modes[i].port == port &&
- mvebu_comphy_cp110_modes[i].mode == mode &&
- mvebu_comphy_cp110_modes[i].submode == submode)
+ conf = &mvebu_comphy_cp110_modes[i];
+ if (conf->lane == lane &&
+ conf->port == port &&
+ conf->mode == mode &&
+ (conf->submode == submode || ignore_submode))
break;
}
if (i == n)
return -EINVAL;
- return mvebu_comphy_cp110_modes[i].mux;
+ if (fw_mode)
+ return conf->fw_mode;
+ else
+ return conf->mux;
}
-static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
+static inline int mvebu_comphy_get_mux(int lane, int port,
+ enum phy_mode mode, int submode)
+{
+ return mvebu_comphy_get_mode(false, lane, port, mode, submode);
+}
+
+static inline int mvebu_comphy_get_fw_mode(int lane, int port,
+ enum phy_mode mode, int submode)
+{
+ return mvebu_comphy_get_mode(true, lane, port, mode, submode);
+}
+
+static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
{
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
@@ -210,20 +338,61 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
MVEBU_COMPHY_SERDES_CFG0_PU_TX |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS |
MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xf) |
- MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf));
- if (lane->submode == PHY_INTERFACE_MODE_10GKR)
+ MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf) |
+ MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE);
+
+ switch (lane->submode) {
+ case PHY_INTERFACE_MODE_10GKR:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
- else if (lane->submode == PHY_INTERFACE_MODE_2500BASEX)
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xb) |
+ MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xb) |
+ MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
- else if (lane->submode == PHY_INTERFACE_MODE_SGMII)
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
+ break;
+ default:
+ dev_err(priv->dev,
+ "unsupported comphy submode (%d) on lane %d\n",
+ lane->submode,
+ lane->id);
+ return -ENOTSUPP;
+ }
+
writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id));
+ if (lane->submode == PHY_INTERFACE_MODE_RXAUI) {
+ regmap_read(priv->regmap, MVEBU_COMPHY_SD1_CTRL1, &val);
+
+ switch (lane->id) {
+ case 2:
+ case 3:
+ val |= MVEBU_COMPHY_SD1_CTRL1_RXAUI0_EN;
+ break;
+ case 4:
+ case 5:
+ val |= MVEBU_COMPHY_SD1_CTRL1_RXAUI1_EN;
+ break;
+ default:
+ dev_err(priv->dev,
+ "RXAUI is not supported on comphy lane %d\n",
+ lane->id);
+ return -EINVAL;
+ }
+
+ regmap_write(priv->regmap, MVEBU_COMPHY_SD1_CTRL1, val);
+ }
+
/* reset */
val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id));
val &= ~(MVEBU_COMPHY_SERDES_CFG1_RESET |
@@ -264,6 +433,8 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
val &= ~MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x7);
val |= MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x1);
writel(val, priv->base + MVEBU_COMPHY_LOOPBACK(lane->id));
+
+ return 0;
}
static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane)
@@ -312,17 +483,20 @@ static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
+ int err;
- mvebu_comphy_ethernet_init_reset(lane);
+ err = mvebu_comphy_ethernet_init_reset(lane);
+ if (err)
+ return err;
val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL;
writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
- val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
- val &= ~MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN;
- writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
+ val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+ val &= ~MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
+ writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
regmap_read(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), &val);
val &= ~MVEBU_COMPHY_CONF1_USB_PCIE;
@@ -337,22 +511,78 @@ static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
+static int mvebu_comphy_set_mode_rxaui(struct phy *phy)
+{
+ struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
+ struct mvebu_comphy_priv *priv = lane->priv;
+ u32 val;
+ int err;
+
+ err = mvebu_comphy_ethernet_init_reset(lane);
+ if (err)
+ return err;
+
+ val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
+ val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL |
+ MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
+ writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+ val |= MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
+ writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id));
+ val |= MVEBU_COMPHY_SERDES_CFG2_DFE_EN;
+ writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_DFE_RES(lane->id));
+ val |= MVEBU_COMPHY_DFE_RES_FORCE_GEN_TBL;
+ writel(val, priv->base + MVEBU_COMPHY_DFE_RES(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_GEN1_S0(lane->id));
+ val &= ~MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xf);
+ val |= MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xd);
+ writel(val, priv->base + MVEBU_COMPHY_GEN1_S0(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_GEN1_S1(lane->id));
+ val &= ~(MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x7) |
+ MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x7));
+ val |= MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x1) |
+ MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x1) |
+ MVEBU_COMPHY_GEN1_S1_RX_DFE_EN;
+ writel(val, priv->base + MVEBU_COMPHY_GEN1_S1(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_COEF(lane->id));
+ val &= ~(MVEBU_COMPHY_COEF_DFE_EN | MVEBU_COMPHY_COEF_DFE_CTRL);
+ writel(val, priv->base + MVEBU_COMPHY_COEF(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_GEN1_S4(lane->id));
+ val &= ~MVEBU_COMPHY_GEN1_S4_DFE_RES(0x3);
+ val |= MVEBU_COMPHY_GEN1_S4_DFE_RES(0x1);
+ writel(val, priv->base + MVEBU_COMPHY_GEN1_S4(lane->id));
+
+ return mvebu_comphy_init_plls(lane);
+}
+
static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
+ int err;
- mvebu_comphy_ethernet_init_reset(lane);
+ err = mvebu_comphy_ethernet_init_reset(lane);
+ if (err)
+ return err;
val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL |
MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
- val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
- val |= MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN;
- writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
+ val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+ val |= MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
+ writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
/* Speed divider */
val = readl(priv->base + MVEBU_COMPHY_SPEED_DIV(lane->id));
@@ -476,7 +706,7 @@ static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
-static int mvebu_comphy_power_on(struct phy *phy)
+static int mvebu_comphy_power_on_legacy(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -502,6 +732,9 @@ static int mvebu_comphy_power_on(struct phy *phy)
case PHY_INTERFACE_MODE_2500BASEX:
ret = mvebu_comphy_set_mode_sgmii(phy);
break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ ret = mvebu_comphy_set_mode_rxaui(phy);
+ break;
case PHY_INTERFACE_MODE_10GKR:
ret = mvebu_comphy_set_mode_10gkr(phy);
break;
@@ -517,26 +750,110 @@ static int mvebu_comphy_power_on(struct phy *phy)
return ret;
}
+static int mvebu_comphy_power_on(struct phy *phy)
+{
+ struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
+ struct mvebu_comphy_priv *priv = lane->priv;
+ int fw_mode, fw_speed;
+ u32 fw_param = 0;
+ int ret;
+
+ fw_mode = mvebu_comphy_get_fw_mode(lane->id, lane->port,
+ lane->mode, lane->submode);
+ if (fw_mode < 0)
+ goto try_legacy;
+
+ /* Try SMC flow first */
+ switch (lane->mode) {
+ case PHY_MODE_ETHERNET:
+ switch (lane->submode) {
+ case PHY_INTERFACE_MODE_RXAUI:
+ dev_dbg(priv->dev, "set lane %d to RXAUI mode\n",
+ lane->id);
+ fw_speed = 0;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ dev_dbg(priv->dev, "set lane %d to 1000BASE-X mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_1250;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ dev_dbg(priv->dev, "set lane %d to 2500BASE-X mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_3125;
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ dev_dbg(priv->dev, "set lane %d to 10G-KR mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_103125;
+ break;
+ default:
+ dev_err(priv->dev, "unsupported Ethernet mode (%d)\n",
+ lane->submode);
+ return -ENOTSUPP;
+ }
+ fw_param = COMPHY_FW_PARAM_ETH(fw_mode, lane->port, fw_speed);
+ break;
+ case PHY_MODE_USB_HOST_SS:
+ case PHY_MODE_USB_DEVICE_SS:
+ dev_dbg(priv->dev, "set lane %d to USB3 mode\n", lane->id);
+ fw_param = COMPHY_FW_PARAM(fw_mode, lane->port);
+ break;
+ case PHY_MODE_SATA:
+ dev_dbg(priv->dev, "set lane %d to SATA mode\n", lane->id);
+ fw_param = COMPHY_FW_PARAM(fw_mode, lane->port);
+ break;
+ case PHY_MODE_PCIE:
+ dev_dbg(priv->dev, "set lane %d to PCIe mode (x%d)\n", lane->id,
+ lane->submode);
+ fw_param = COMPHY_FW_PARAM_PCIE(fw_mode, lane->port,
+ lane->submode);
+ break;
+ default:
+ dev_err(priv->dev, "unsupported PHY mode (%d)\n", lane->mode);
+ return -ENOTSUPP;
+ }
+
+ ret = mvebu_comphy_smc(COMPHY_SIP_POWER_ON, priv->cp_phys, lane->id,
+ fw_param);
+ if (!ret)
+ return ret;
+
+ if (ret == COMPHY_FW_NOT_SUPPORTED)
+ dev_err(priv->dev,
+ "unsupported SMC call, try updating your firmware\n");
+
+ dev_warn(priv->dev,
+ "Firmware could not configure PHY %d with mode %d (ret: %d), trying legacy method\n",
+ lane->id, lane->mode, ret);
+
+try_legacy:
+ /* Fallback to Linux's implementation */
+ return mvebu_comphy_power_on_legacy(phy);
+}
+
static int mvebu_comphy_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
- if (mode != PHY_MODE_ETHERNET)
- return -EINVAL;
-
if (submode == PHY_INTERFACE_MODE_1000BASEX)
submode = PHY_INTERFACE_MODE_SGMII;
- if (mvebu_comphy_get_mux(lane->id, lane->port, mode, submode) < 0)
+ if (mvebu_comphy_get_fw_mode(lane->id, lane->port, mode, submode) < 0)
return -EINVAL;
lane->mode = mode;
lane->submode = submode;
+
+ /* PCIe submode represents the width */
+ if (mode == PHY_MODE_PCIE && !lane->submode)
+ lane->submode = 1;
+
return 0;
}
-static int mvebu_comphy_power_off(struct phy *phy)
+static int mvebu_comphy_power_off_legacy(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -559,6 +876,21 @@ static int mvebu_comphy_power_off(struct phy *phy)
return 0;
}
+static int mvebu_comphy_power_off(struct phy *phy)
+{
+ struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
+ struct mvebu_comphy_priv *priv = lane->priv;
+ int ret;
+
+ ret = mvebu_comphy_smc(COMPHY_SIP_POWER_OFF, priv->cp_phys,
+ lane->id, 0);
+ if (!ret)
+ return ret;
+
+ /* Fallback to Linux's implementation */
+ return mvebu_comphy_power_off_legacy(phy);
+}
+
static const struct phy_ops mvebu_comphy_ops = {
.power_on = mvebu_comphy_power_on,
.power_off = mvebu_comphy_power_off,
@@ -585,12 +917,72 @@ static struct phy *mvebu_comphy_xlate(struct device *dev,
return phy;
}
+static int mvebu_comphy_init_clks(struct mvebu_comphy_priv *priv)
+{
+ int ret;
+
+ priv->mg_domain_clk = devm_clk_get(priv->dev, "mg_clk");
+ if (IS_ERR(priv->mg_domain_clk))
+ return PTR_ERR(priv->mg_domain_clk);
+
+ ret = clk_prepare_enable(priv->mg_domain_clk);
+ if (ret < 0)
+ return ret;
+
+ priv->mg_core_clk = devm_clk_get(priv->dev, "mg_core_clk");
+ if (IS_ERR(priv->mg_core_clk)) {
+ ret = PTR_ERR(priv->mg_core_clk);
+ goto dis_mg_domain_clk;
+ }
+
+ ret = clk_prepare_enable(priv->mg_core_clk);
+ if (ret < 0)
+ goto dis_mg_domain_clk;
+
+ priv->axi_clk = devm_clk_get(priv->dev, "axi_clk");
+ if (IS_ERR(priv->axi_clk)) {
+ ret = PTR_ERR(priv->axi_clk);
+ goto dis_mg_core_clk;
+ }
+
+ ret = clk_prepare_enable(priv->axi_clk);
+ if (ret < 0)
+ goto dis_mg_core_clk;
+
+ return 0;
+
+dis_mg_core_clk:
+ clk_disable_unprepare(priv->mg_core_clk);
+
+dis_mg_domain_clk:
+ clk_disable_unprepare(priv->mg_domain_clk);
+
+ priv->mg_domain_clk = NULL;
+ priv->mg_core_clk = NULL;
+ priv->axi_clk = NULL;
+
+ return ret;
+};
+
+static void mvebu_comphy_disable_unprepare_clks(struct mvebu_comphy_priv *priv)
+{
+ if (priv->axi_clk)
+ clk_disable_unprepare(priv->axi_clk);
+
+ if (priv->mg_core_clk)
+ clk_disable_unprepare(priv->mg_core_clk);
+
+ if (priv->mg_domain_clk)
+ clk_disable_unprepare(priv->mg_domain_clk);
+}
+
static int mvebu_comphy_probe(struct platform_device *pdev)
{
struct mvebu_comphy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
struct resource *res;
+ int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -607,10 +999,26 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ /*
+ * Ignore error if clocks have not been initialized properly for DT
+ * compatibility reasons.
+ */
+ ret = mvebu_comphy_init_clks(priv);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&pdev->dev, "cannot initialize clocks\n");
+ }
+
+ /*
+ * Hack to retrieve a physical offset relative to this CP that will be
+ * given to the firmware
+ */
+ priv->cp_phys = res->start;
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_comphy_lane *lane;
struct phy *phy;
- int ret;
u32 val;
ret = of_property_read_u32(child, "reg", &val);
@@ -626,30 +1034,45 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
}
lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
- if (!lane)
- return -ENOMEM;
+ if (!lane) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto disable_clks;
+ }
phy = devm_phy_create(&pdev->dev, child, &mvebu_comphy_ops);
- if (IS_ERR(phy))
- return PTR_ERR(phy);
+ if (IS_ERR(phy)) {
+ of_node_put(child);
+ ret = PTR_ERR(phy);
+ goto disable_clks;
+ }
lane->priv = priv;
lane->mode = PHY_MODE_INVALID;
+ lane->submode = PHY_INTERFACE_MODE_NA;
lane->id = val;
lane->port = -1;
phy_set_drvdata(phy, lane);
/*
- * Once all modes are supported in this driver we should call
+ * All modes are supported in this driver so we could call
* mvebu_comphy_power_off(phy) here to avoid relying on the
- * bootloader/firmware configuration.
+ * bootloader/firmware configuration, but for compatibility
+ * reasons we cannot de-configure the COMPHY without being sure
+ * that the firmware is up-to-date and fully-featured.
*/
}
dev_set_drvdata(&pdev->dev, priv);
provider = devm_of_phy_provider_register(&pdev->dev,
mvebu_comphy_xlate);
+
return PTR_ERR_OR_ZERO(provider);
+
+disable_clks:
+ mvebu_comphy_disable_unprepare_clks(priv);
+
+ return ret;
}
static const struct of_device_id mvebu_comphy_of_match_table[] = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index e3880c4a15f2..b04f4fe85ac2 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -394,6 +394,16 @@ int phy_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_reset);
+/**
+ * phy_calibrate() - Tunes the phy hw parameters for current configuration
+ * @phy: the phy returned by phy_get()
+ *
+ * Used to calibrate phy hardware, typically by adjusting some parameters in
+ * runtime, which are otherwise lost after host controller reset and cannot
+ * be applied in phy_init() or phy_power_on().
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
int phy_calibrate(struct phy *phy)
{
int ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 34ff6434da8f..39e8deb8001e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -35,7 +35,7 @@
#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-/* QPHY_COM_PCS_READY_STATUS bit */
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
@@ -115,6 +115,7 @@ enum qphy_reg_layout {
QPHY_SW_RESET,
QPHY_START_CTRL,
QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
@@ -133,7 +134,7 @@ static const unsigned int pciephy_regs_layout[] = {
[QPHY_FLL_MAN_CODE] = 0xd4,
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_STATUS] = 0x174,
};
static const unsigned int usb3phy_regs_layout[] = {
@@ -144,7 +145,7 @@ static const unsigned int usb3phy_regs_layout[] = {
[QPHY_FLL_MAN_CODE] = 0xd0,
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_READY_STATUS] = 0x17c,
+ [QPHY_PCS_STATUS] = 0x17c,
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
@@ -153,7 +154,7 @@ static const unsigned int usb3phy_regs_layout[] = {
static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_STATUS] = 0x174,
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
@@ -911,7 +912,6 @@ struct qmp_phy_cfg {
unsigned int start_ctrl;
unsigned int pwrdn_ctrl;
- unsigned int mask_pcs_ready;
unsigned int mask_com_pcs_ready;
/* true, if PHY has a separate PHY_COM control block */
@@ -1074,7 +1074,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_pcs_ready = PHYSTATUS,
.mask_com_pcs_ready = PCS_READY,
.has_phy_com_ctrl = true,
@@ -1106,7 +1105,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
};
/* list of resets */
@@ -1136,7 +1134,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_pcs_ready = PHYSTATUS,
.has_phy_com_ctrl = false,
.has_lane_rst = false,
@@ -1167,7 +1164,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -1199,7 +1195,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -1226,7 +1221,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PCS_READY,
.is_dual_lane_phy = true,
.no_pcs_sw_reset = true,
@@ -1254,7 +1248,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_pcs_ready = PHYSTATUS,
};
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
@@ -1279,7 +1272,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
.is_dual_lane_phy = true,
};
@@ -1457,7 +1449,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
void __iomem *pcs = qphy->pcs;
void __iomem *dp_com = qmp->dp_com;
void __iomem *status;
- unsigned int mask, val;
+ unsigned int mask, val, ready;
int ret;
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
@@ -1545,10 +1537,17 @@ static int qcom_qmp_phy_enable(struct phy *phy)
/* start SerDes and Phy-Coding-Sublayer */
qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
- status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
- mask = cfg->mask_pcs_ready;
+ if (cfg->type == PHY_TYPE_UFS) {
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = PCS_READY;
+ ready = PCS_READY;
+ } else {
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = PHYSTATUS;
+ ready = 0;
+ }
- ret = readl_poll_timeout(status, val, val & mask, 10,
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
PHY_INIT_COMPLETE_TIMEOUT);
if (ret) {
dev_err(qmp->dev, "phy initialization timed-out\n");
@@ -2093,8 +2092,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
- pm_runtime_disable(dev);
- return ret;
+ goto err_node_put;
}
/*
@@ -2105,8 +2103,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(qmp->dev,
"failed to register pipe clock source\n");
- pm_runtime_disable(dev);
- return ret;
+ goto err_node_put;
}
id++;
}
@@ -2118,6 +2115,11 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
}
static struct platform_driver qcom_qmp_phy_driver = {
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 8ffba67568ec..b7f6b1324395 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -61,6 +61,7 @@
USB2_OBINT_IDDIGCHG)
/* VBCTRL */
+#define USB2_VBCTRL_OCCLREN BIT(16)
#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
/* LINECTRL1 */
@@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
writel(val, usb2_base + USB2_LINECTRL1);
val = readl(usb2_base + USB2_VBCTRL);
+ val &= ~USB2_VBCTRL_OCCLREN;
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index b10a84cab4a7..2b97fb1185a0 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -198,7 +198,7 @@
#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7)
#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0)
/* REG:0xc6 */
-#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 9)
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 0)
/* REG:0xc7 */
#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1)
#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1)
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index aebd216dcf2f..6c607df1dc9a 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -109,6 +109,7 @@ static struct platform_driver exynos_dp_video_phy_driver = {
.driver = {
.name = "exynos-dp-video-phy",
.of_match_table = exynos_dp_video_phy_of_match,
+ .suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_dp_video_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index 3784bf100b95..bb51195f189f 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -359,6 +359,7 @@ static struct platform_driver exynos_mipi_video_phy_driver = {
.driver = {
.of_match_table = exynos_mipi_video_phy_of_match,
.name = "exynos-mipi-video-phy",
+ .suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_mipi_video_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 1b4ba8bdb43c..659e7ae0a6cf 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -272,6 +272,7 @@ static struct platform_driver exynos_pcie_phy_driver = {
.driver = {
.of_match_table = exynos_pcie_phy_match,
.name = "exynos_pcie_phy",
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 646259bee909..e510732afb8b 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -953,6 +953,7 @@ static struct platform_driver exynos5_usb3drd_phy = {
.driver = {
.of_match_table = exynos5_usbdrd_phy_of_match,
.name = "exynos5_usb3drd_phy",
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 9e5fc126032c..4dd7324d91b2 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -237,6 +237,7 @@ static struct platform_driver exynos_sata_phy_driver = {
.driver = {
.of_match_table = exynos_sata_phy_of_match,
.name = "samsung,sata-phy",
+ .suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_sata_phy_driver);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 6c82f4fbe8a2..090aa02e02de 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -250,6 +250,7 @@ static struct platform_driver samsung_usb2_phy_driver = {
.driver = {
.of_match_table = samsung_usb2_phy_of_match,
.name = "samsung-usb2-phy",
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index f8edd0840fa2..f14f1f053a75 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -405,6 +405,7 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
const __be32 *addr;
unsigned int reg;
struct clk *clk;
+ int ret = 0;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -413,34 +414,40 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
init = &mux->clk_data;
regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
- of_node_put(regmap_node);
if (!regmap_node) {
dev_err(dev, "Fail to get serdes-clk node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
regmap = syscon_node_to_regmap(regmap_node->parent);
if (IS_ERR(regmap)) {
dev_err(dev, "Fail to get Syscon regmap\n");
- return PTR_ERR(regmap);
+ ret = PTR_ERR(regmap);
+ goto out_put_node;
}
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
dev_err(dev, "SERDES clock must have parents\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
GFP_KERNEL);
- if (!parent_names)
- return -ENOMEM;
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
of_clk_parent_fill(node, parent_names, num_parents);
addr = of_get_address(regmap_node, 0, NULL, NULL);
- if (!addr)
- return -EINVAL;
+ if (!addr) {
+ ret = -EINVAL;
+ goto out_put_node;
+ }
reg = be32_to_cpu(*addr);
@@ -456,12 +463,16 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_put_node;
+ }
am654_phy->clks[clock_num] = clk;
- return 0;
+out_put_node:
+ of_node_put(regmap_node);
+ return ret;
}
static const struct of_device_id serdes_am654_id_table[] = {
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index f3585777324c..29fbab55c3b3 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1338,12 +1338,15 @@ static int of_qcom_slim_ngd_register(struct device *parent,
continue;
ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
- if (!ngd)
+ if (!ngd) {
+ of_node_put(node);
return -ENOMEM;
+ }
ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
if (!ngd->pdev) {
kfree(ngd);
+ of_node_put(node);
return -ENOMEM;
}
ngd->id = id;
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 9be41089edde..b2f013bfe42e 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -439,7 +439,7 @@ static inline bool slim_tid_txn(u8 mt, u8 mc)
(mc == SLIM_MSG_MC_REQUEST_INFORMATION ||
mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION ||
mc == SLIM_MSG_MC_REQUEST_VALUE ||
- mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION));
+ mc == SLIM_MSG_MC_REQUEST_CHANGE_VALUE));
}
static inline bool slim_ec_txn(u8 mt, u8 mc)
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 3f55cb3c81b2..001187c577bf 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
+thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2427d73be731..2ec1af8f7968 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -930,6 +930,23 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
return res;
}
+static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
+ const struct tb_cfg_result *res)
+{
+ /*
+ * For unimplemented ports access to port config space may return
+ * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
+ * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
+ * that the caller can mark the port as disabled.
+ */
+ if (space == TB_CFG_PORT &&
+ res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
+ return -ENODEV;
+
+ tb_cfg_print_error(ctl, res);
+ return -EIO;
+}
+
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
@@ -942,8 +959,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
case 1:
/* Thunderbolt error, tb_error holds the actual number */
- tb_cfg_print_error(ctl, &res);
- return -EIO;
+ return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
@@ -969,8 +985,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
case 1:
/* Thunderbolt error, tb_error holds the actual number */
- tb_cfg_print_error(ctl, &res);
- return -EIO;
+ return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 81e8ac4c5805..ee5196479854 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -414,7 +414,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
struct device *dev = &sw->tb->nhi->pdev->dev;
int len, res;
- len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
+ len = device_property_count_u8(dev, "ThunderboltDROM");
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
@@ -525,10 +525,6 @@ int tb_drom_read(struct tb_switch *sw)
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].dual_link_port = &sw->ports[3];
- /* Port 5 is inaccessible on this gen 1 controller */
- if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
- sw->ports[5].disabled = true;
-
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index fbdcef56a676..245588f691e7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -55,16 +55,20 @@
* @safe_mode: ICM is in safe mode
* @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
* @rpm: Does the controller support runtime PM (RTD3)
+ * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
+ * @veto: Is RTD3 veto in effect
* @is_supported: Checks if we can support ICM on this controller
* @cio_reset: Trigger CIO reset
* @get_mode: Read and return the ICM firmware mode (optional)
* @get_route: Find a route string for given switch
* @save_devices: Ask ICM to save devices to ACL when suspending (optional)
* @driver_ready: Send driver ready message to ICM
+ * @set_uuid: Set UUID for the root switch (optional)
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
* @xdomain_connected - Handle XDomain connected ICM message
* @xdomain_disconnected - Handle XDomain disconnected ICM message
+ * @rtd3_veto: Handle RTD3 veto notification ICM message
*/
struct icm {
struct mutex request_lock;
@@ -74,6 +78,8 @@ struct icm {
int vnd_cap;
bool safe_mode;
bool rpm;
+ bool can_upgrade_nvm;
+ bool veto;
bool (*is_supported)(struct tb *tb);
int (*cio_reset)(struct tb *tb);
int (*get_mode)(struct tb *tb);
@@ -82,6 +88,7 @@ struct icm {
int (*driver_ready)(struct tb *tb,
enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm);
+ void (*set_uuid)(struct tb *tb);
void (*device_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
@@ -90,6 +97,7 @@ struct icm {
const struct icm_pkg_header *hdr);
void (*xdomain_disconnected)(struct tb *tb,
const struct icm_pkg_header *hdr);
+ void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
};
struct icm_notification {
@@ -294,6 +302,43 @@ static int icm_request(struct tb *tb, const void *request, size_t request_size,
return -ETIMEDOUT;
}
+/*
+ * If rescan is queued to run (we are resuming), postpone it to give the
+ * firmware some more time to send device connected notifications for next
+ * devices in the chain.
+ */
+static void icm_postpone_rescan(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (delayed_work_pending(&icm->rescan_work))
+ mod_delayed_work(tb->wq, &icm->rescan_work,
+ msecs_to_jiffies(500));
+}
+
+static void icm_veto_begin(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (!icm->veto) {
+ icm->veto = true;
+ /* Keep the domain powered while veto is in effect */
+ pm_runtime_get(&tb->dev);
+ }
+}
+
+static void icm_veto_end(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (icm->veto) {
+ icm->veto = false;
+ /* Allow the domain suspend now */
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+ }
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -517,14 +562,16 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0;
}
-static void add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, const u8 *ep_name,
- size_t ep_name_size, u8 connection_id, u8 connection_key,
- u8 link, u8 depth, enum tb_security_level security_level,
- bool authorized, bool boot)
+static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid, const u8 *ep_name,
+ size_t ep_name_size, u8 connection_id,
+ u8 connection_key, u8 link, u8 depth,
+ enum tb_security_level security_level,
+ bool authorized, bool boot)
{
const struct intel_vss *vss;
struct tb_switch *sw;
+ int ret;
pm_runtime_get_sync(&parent_sw->dev);
@@ -555,14 +602,18 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
- if (tb_switch_add(sw)) {
+ ret = tb_switch_add(sw);
+ if (ret) {
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
tb_switch_put(sw);
+ sw = ERR_PTR(ret);
}
out:
pm_runtime_mark_last_busy(&parent_sw->dev);
pm_runtime_put_autosuspend(&parent_sw->dev);
+
+ return sw;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -654,6 +705,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
u64 route;
int ret;
+ icm_postpone_rescan(tb);
+
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1084,7 +1137,8 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
}
static void
-icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
+ bool force_rtd3)
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
@@ -1094,6 +1148,8 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
bool authorized, boot;
u64 route;
+ icm_postpone_rescan(tb);
+
/*
* Currently we don't use the QoS information coming with the
* device connected message so simply just ignore that extra
@@ -1149,14 +1205,22 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id,
- 0, 0, 0, security_level, authorized, boot);
+ sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+ sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
+ security_level, authorized, boot);
+ if (!IS_ERR(sw) && force_rtd3)
+ sw->rpm = true;
tb_switch_put(parent_sw);
}
static void
+icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ __icm_tr_device_connected(tb, hdr, false);
+}
+
+static void
icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_tr_event_device_disconnected *pkg =
@@ -1466,6 +1530,61 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
return 0;
}
+static int
+icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm_tr_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, 20000);
+ if (ret)
+ return ret;
+
+ /* Ice Lake always supports RTD3 */
+ if (rpm)
+ *rpm = true;
+
+ return 0;
+}
+
+static void icm_icl_set_uuid(struct tb *tb)
+{
+ struct tb_nhi *nhi = tb->nhi;
+ u32 uuid[4];
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
+ pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
+ uuid[2] = 0xffffffff;
+ uuid[3] = 0xffffffff;
+
+ tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static void
+icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ __icm_tr_device_connected(tb, hdr, true);
+}
+
+static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_icl_event_rtd3_veto *pkg =
+ (const struct icm_icl_event_rtd3_veto *)hdr;
+
+ tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
+
+ if (pkg->veto_reason)
+ icm_veto_begin(tb);
+ else
+ icm_veto_end(tb);
+}
+
static void icm_handle_notification(struct work_struct *work)
{
struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -1493,6 +1612,9 @@ static void icm_handle_notification(struct work_struct *work)
case ICM_EVENT_XDOMAIN_DISCONNECTED:
icm->xdomain_disconnected(tb, n->pkg);
break;
+ case ICM_EVENT_RTD3_VETO:
+ icm->rtd3_veto(tb, n->pkg);
+ break;
}
}
@@ -1851,6 +1973,13 @@ static void icm_complete(struct tb *tb)
if (tb->nhi->going_away)
return;
+ /*
+ * If RTD3 was vetoed before we entered system suspend allow it
+ * again now before driver ready is sent. Firmware sends a new RTD3
+ * veto if it is still the case after we have sent it driver ready
+ * command.
+ */
+ icm_veto_end(tb);
icm_unplug_children(tb->root_switch);
/*
@@ -1913,14 +2042,12 @@ static int icm_start(struct tb *tb)
if (IS_ERR(tb->root_switch))
return PTR_ERR(tb->root_switch);
- /*
- * NVM upgrade has not been tested on Apple systems and they
- * don't provide images publicly either. To be on the safe side
- * prevent root switch NVM upgrade on Macs for now.
- */
- tb->root_switch->no_nvm_upgrade = x86_apple_machine;
+ tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
tb->root_switch->rpm = icm->rpm;
+ if (icm->set_uuid)
+ icm->set_uuid(tb);
+
ret = tb_switch_add(tb->root_switch);
if (ret) {
tb_switch_put(tb->root_switch);
@@ -2005,6 +2132,19 @@ static const struct tb_cm_ops icm_tr_ops = {
.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
};
+/* Ice Lake */
+static const struct tb_cm_ops icm_icl_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
+ .handle_event = icm_handle_event,
+ .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
struct tb *icm_probe(struct tb_nhi *nhi)
{
struct icm *icm;
@@ -2021,6 +2161,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ icm->can_upgrade_nvm = true;
icm->is_supported = icm_fr_is_supported;
icm->get_route = icm_fr_get_route;
icm->save_devices = icm_fr_save_devices;
@@ -2038,6 +2179,13 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+ /*
+ * NVM upgrade has not been tested on Apple systems and
+ * they don't provide images publicly either. To be on
+ * the safe side prevent root switch NVM upgrade on Macs
+ * for now.
+ */
+ icm->can_upgrade_nvm = !x86_apple_machine;
icm->is_supported = icm_ar_is_supported;
icm->cio_reset = icm_ar_cio_reset;
icm->get_mode = icm_ar_get_mode;
@@ -2054,6 +2202,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+ icm->can_upgrade_nvm = !x86_apple_machine;
icm->is_supported = icm_ar_is_supported;
icm->cio_reset = icm_tr_cio_reset;
icm->get_mode = icm_ar_get_mode;
@@ -2064,6 +2213,19 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
tb->cm_ops = &icm_tr_ops;
break;
+
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ icm->is_supported = icm_ar_is_supported;
+ icm->driver_ready = icm_icl_driver_ready;
+ icm->set_uuid = icm_icl_set_uuid;
+ icm->device_connected = icm_icl_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ icm->rtd3_veto = icm_icl_rtd3_veto;
+ tb->cm_ops = &icm_icl_ops;
+ break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 27fbe62c7ddd..641b21b54460 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/property.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -143,9 +144,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
- iowrite16(value, ring_desc_base(ring) + offset);
+ /*
+ * The other 16-bits in the register is read-only and writes to it
+ * are ignored by the hardware so we can save one ioread32() by
+ * filling the read-only bits with zeroes.
+ */
+ iowrite32(cons, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
+{
+ /* See ring_iowrite_cons() above for explanation */
+ iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@@ -197,7 +209,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
- ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+ if (ring->is_tx)
+ ring_iowrite_prod(ring, ring->head);
+ else
+ ring_iowrite_cons(ring, ring->head);
}
}
@@ -662,7 +677,7 @@ void tb_ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
- ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+ ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;
@@ -845,12 +860,52 @@ static irqreturn_t nhi_msi(int irq, void *data)
return IRQ_HANDLED;
}
-static int nhi_suspend_noirq(struct device *dev)
+static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_suspend_noirq(tb);
+ if (ret)
+ return ret;
+
+ if (nhi->ops && nhi->ops->suspend_noirq) {
+ ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nhi_suspend_noirq(struct device *dev)
+{
+ return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
+}
+
+static bool nhi_wake_supported(struct pci_dev *pdev)
+{
+ u8 val;
+
+ /*
+ * If power rails are sustainable for wakeup from S4 this
+ * property is set by the BIOS.
+ */
+ if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
+ return !!val;
+
+ return true;
+}
+
+static int nhi_poweroff_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool wakeup;
- return tb_domain_suspend_noirq(tb);
+ wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
+ return __nhi_suspend_noirq(dev, wakeup);
}
static void nhi_enable_int_throttling(struct tb_nhi *nhi)
@@ -873,16 +928,24 @@ static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
/*
* Check that the device is still there. It may be that the user
* unplugged last device which causes the host controller to go
* away on PCs.
*/
- if (!pci_device_is_present(pdev))
- tb->nhi->going_away = true;
- else
+ if (!pci_device_is_present(pdev)) {
+ nhi->going_away = true;
+ } else {
+ if (nhi->ops && nhi->ops->resume_noirq) {
+ ret = nhi->ops->resume_noirq(nhi);
+ if (ret)
+ return ret;
+ }
nhi_enable_int_throttling(tb->nhi);
+ }
return tb_domain_resume_noirq(tb);
}
@@ -915,16 +978,35 @@ static int nhi_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_runtime_suspend(tb);
+ if (ret)
+ return ret;
- return tb_domain_runtime_suspend(tb);
+ if (nhi->ops && nhi->ops->runtime_suspend) {
+ ret = nhi->ops->runtime_suspend(tb->nhi);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int nhi_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
- nhi_enable_int_throttling(tb->nhi);
+ if (nhi->ops && nhi->ops->runtime_resume) {
+ ret = nhi->ops->runtime_resume(nhi);
+ if (ret)
+ return ret;
+ }
+
+ nhi_enable_int_throttling(nhi);
return tb_domain_runtime_resume(tb);
}
@@ -952,6 +1034,9 @@ static void nhi_shutdown(struct tb_nhi *nhi)
flush_work(&nhi->interrupt_work);
}
ida_destroy(&nhi->msix_ida);
+
+ if (nhi->ops && nhi->ops->shutdown)
+ nhi->ops->shutdown(nhi);
}
static int nhi_init_msi(struct tb_nhi *nhi)
@@ -996,12 +1081,27 @@ static int nhi_init_msi(struct tb_nhi *nhi)
return 0;
}
+static bool nhi_imr_valid(struct pci_dev *pdev)
+{
+ u8 val;
+
+ if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
+ return !!val;
+
+ return true;
+}
+
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
struct tb *tb;
int res;
+ if (!nhi_imr_valid(pdev)) {
+ dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
+ return -ENODEV;
+ }
+
res = pcim_enable_device(pdev);
if (res) {
dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
@@ -1019,6 +1119,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
nhi->pdev = pdev;
+ nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated bin pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
@@ -1051,6 +1152,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ if (nhi->ops && nhi->ops->init) {
+ res = nhi->ops->init(nhi);
+ if (res)
+ return res;
+ }
+
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
@@ -1111,6 +1218,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
.restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend,
.freeze = nhi_suspend,
+ .poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend,
.complete = nhi_complete,
.runtime_suspend = nhi_runtime_suspend,
@@ -1158,6 +1266,10 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ 0,}
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1b5d47ecd3ed..b7b973949f8e 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -30,6 +30,26 @@ enum nhi_mailbox_cmd {
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
+/**
+ * struct tb_nhi_ops - NHI specific optional operations
+ * @init: NHI specific initialization
+ * @suspend_noirq: NHI specific suspend_noirq hook
+ * @resume_noirq: NHI specific resume_noirq hook
+ * @runtime_suspend: NHI specific runtime_suspend hook
+ * @runtime_resume: NHI specific runtime_resume hook
+ * @shutdown: NHI specific shutdown
+ */
+struct tb_nhi_ops {
+ int (*init)(struct tb_nhi *nhi);
+ int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup);
+ int (*resume_noirq)(struct tb_nhi *nhi);
+ int (*runtime_suspend)(struct tb_nhi *nhi);
+ int (*runtime_resume)(struct tb_nhi *nhi);
+ void (*shutdown)(struct tb_nhi *nhi);
+};
+
+extern const struct tb_nhi_ops icl_nhi_ops;
+
/*
* PCI IDs used in this driver from Win Ridge forward. There is no
* need for the PCI quirk anymore as we will use ICM also on Apple
@@ -51,5 +71,7 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
+#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
+#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
new file mode 100644
index 000000000000..61cd09cef943
--- /dev/null
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHI specific operations
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+/* Ice Lake specific NHI operations */
+
+#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */
+
+static int check_for_device(struct device *dev, void *data)
+{
+ return tb_is_switch(dev);
+}
+
+static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
+{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
+ int ret;
+
+ ret = device_for_each_child(&tb->root_switch->dev, NULL,
+ check_for_device);
+ return ret > 0;
+}
+
+static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
+{
+ u32 vs_cap;
+
+ /*
+ * The Thunderbolt host controller is present always in Ice Lake
+ * but the firmware may not be loaded and running (depending
+ * whether there is device connected and so on). Each time the
+ * controller is used we need to "Force Power" it first and wait
+ * for the firmware to indicate it is up and running. This "Force
+ * Power" is really not about actually powering on/off the
+ * controller so it is accessible even if "Force Power" is off.
+ *
+ * The actual power management happens inside shared ACPI power
+ * resources using standard ACPI methods.
+ */
+ pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
+ if (power) {
+ vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
+ vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
+ vs_cap |= VS_CAP_22_FORCE_POWER;
+ } else {
+ vs_cap &= ~VS_CAP_22_FORCE_POWER;
+ }
+ pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
+
+ if (power) {
+ unsigned int retries = 10;
+ u32 val;
+
+ /* Wait until the firmware tells it is up and running */
+ do {
+ pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
+ if (val & VS_CAP_9_FW_READY)
+ return 0;
+ msleep(250);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
+{
+ u32 data;
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
+ data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
+ pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
+}
+
+static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
+{
+ unsigned long end;
+ u32 data;
+
+ if (!timeout)
+ goto clear;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
+ if (data & VS_CAP_18_DONE)
+ goto clear;
+ msleep(100);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+
+clear:
+ /* Clear the valid bit */
+ pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
+ return 0;
+}
+
+static void icl_nhi_set_ltr(struct tb_nhi *nhi)
+{
+ u32 max_ltr, ltr;
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
+ max_ltr &= 0xffff;
+ /* Program the same value for both snoop and no-snoop */
+ ltr = max_ltr << 16 | max_ltr;
+ pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
+}
+
+static int icl_nhi_suspend(struct tb_nhi *nhi)
+{
+ int ret;
+
+ if (icl_nhi_is_device_connected(nhi))
+ return 0;
+
+ /*
+ * If there is no device connected we need to perform both: a
+ * handshake through LC mailbox and force power down before
+ * entering D3.
+ */
+ icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
+ ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return icl_nhi_force_power(nhi, false);
+}
+
+static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
+{
+ enum icl_lc_mailbox_cmd cmd;
+
+ if (!pm_suspend_via_firmware())
+ return icl_nhi_suspend(nhi);
+
+ cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
+ icl_nhi_lc_mailbox_cmd(nhi, cmd);
+ return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+}
+
+static int icl_nhi_resume(struct tb_nhi *nhi)
+{
+ int ret;
+
+ ret = icl_nhi_force_power(nhi, true);
+ if (ret)
+ return ret;
+
+ icl_nhi_set_ltr(nhi);
+ return 0;
+}
+
+static void icl_nhi_shutdown(struct tb_nhi *nhi)
+{
+ icl_nhi_force_power(nhi, false);
+}
+
+const struct tb_nhi_ops icl_nhi_ops = {
+ .init = icl_nhi_resume,
+ .suspend_noirq = icl_nhi_suspend_noirq,
+ .resume_noirq = icl_nhi_resume,
+ .runtime_suspend = icl_nhi_suspend,
+ .runtime_resume = icl_nhi_resume,
+ .shutdown = icl_nhi_shutdown,
+};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index a60bd98c1d04..0d4970dcef84 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -124,4 +124,41 @@ struct ring_desc {
#define REG_FW_STS_ICM_EN_INVERT BIT(1)
#define REG_FW_STS_ICM_EN BIT(0)
+/* ICL NHI VSEC registers */
+
+/* FW ready */
+#define VS_CAP_9 0xc8
+#define VS_CAP_9_FW_READY BIT(31)
+/* UUID */
+#define VS_CAP_10 0xcc
+#define VS_CAP_11 0xd0
+/* LTR */
+#define VS_CAP_15 0xe0
+#define VS_CAP_16 0xe4
+/* TBT2PCIe */
+#define VS_CAP_18 0xec
+#define VS_CAP_18_DONE BIT(0)
+/* PCIe2TBT */
+#define VS_CAP_19 0xf0
+#define VS_CAP_19_VALID BIT(0)
+#define VS_CAP_19_CMD_SHIFT 1
+#define VS_CAP_19_CMD_MASK GENMASK(7, 1)
+/* Force power */
+#define VS_CAP_22 0xfc
+#define VS_CAP_22_FORCE_POWER BIT(1)
+#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24)
+#define VS_CAP_22_DMA_DELAY_SHIFT 24
+
+/**
+ * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands
+ * @ICL_LC_GO2SX: Ask LC to enter Sx without wake
+ * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake
+ * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset
+ */
+enum icl_lc_mailbox_cmd {
+ ICL_LC_GO2SX = 0x02,
+ ICL_LC_GO2SX_NO_WAKE = 0x03,
+ ICL_LC_PREPARE_FOR_RESET = 0x21,
+};
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5668a44e0653..410bf1bceeee 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -364,12 +364,14 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
nvm->active = nvm_dev;
}
- nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
- if (IS_ERR(nvm_dev)) {
- ret = PTR_ERR(nvm_dev);
- goto err_nvm_active;
+ if (!sw->no_nvm_upgrade) {
+ nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+ if (IS_ERR(nvm_dev)) {
+ ret = PTR_ERR(nvm_dev);
+ goto err_nvm_active;
+ }
+ nvm->non_active = nvm_dev;
}
- nvm->non_active = nvm_dev;
sw->nvm = nvm;
return 0;
@@ -398,7 +400,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
if (!nvm->authenticating)
nvm_clear_auth_status(sw);
- nvmem_unregister(nvm->non_active);
+ if (nvm->non_active)
+ nvmem_unregister(nvm->non_active);
if (nvm->active)
nvmem_unregister(nvm->active);
ida_simple_remove(&nvm_ida, nvm->id);
@@ -611,8 +614,14 @@ static int tb_init_port(struct tb_port *port)
int cap;
res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
- if (res)
+ if (res) {
+ if (res == -ENODEV) {
+ tb_dbg(port->sw->tb, " Port %d: not implemented\n",
+ port->port);
+ return 0;
+ }
return res;
+ }
/* Port 0 is the switch itself and has no PHY. */
if (port->config.type == TB_TYPE_PORT && port->port != 0) {
@@ -1331,14 +1340,29 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct tb_switch *sw = tb_to_switch(dev);
- if (attr == &dev_attr_key.attr) {
+ if (attr == &dev_attr_device.attr) {
+ if (!sw->device)
+ return 0;
+ } else if (attr == &dev_attr_device_name.attr) {
+ if (!sw->device_name)
+ return 0;
+ } else if (attr == &dev_attr_vendor.attr) {
+ if (!sw->vendor)
+ return 0;
+ } else if (attr == &dev_attr_vendor_name.attr) {
+ if (!sw->vendor_name)
+ return 0;
+ } else if (attr == &dev_attr_key.attr) {
if (tb_route(sw) &&
sw->tb->security_level == TB_SECURITY_SECURE &&
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
- } else if (attr == &dev_attr_nvm_authenticate.attr ||
- attr == &dev_attr_nvm_version.attr) {
+ } else if (attr == &dev_attr_nvm_authenticate.attr) {
+ if (sw->dma_port && !sw->no_nvm_upgrade)
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_nvm_version.attr) {
if (sw->dma_port)
return attr->mode;
return 0;
@@ -1446,6 +1470,8 @@ static int tb_switch_get_generation(struct tb_switch *sw)
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return 3;
default:
@@ -1689,13 +1715,17 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
break;
}
- if (sw->no_nvm_upgrade)
+ /* Root switch DMA port requires running firmware */
+ if (!tb_route(sw) && sw->config.enabled)
return 0;
sw->dma_port = dma_port_alloc(sw);
if (!sw->dma_port)
return 0;
+ if (sw->no_nvm_upgrade)
+ return 0;
+
/*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index afbe1d29bb03..4b641e4ee0c5 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -104,10 +104,11 @@ enum icm_pkg_code {
};
enum icm_event_code {
- ICM_EVENT_DEVICE_CONNECTED = 3,
- ICM_EVENT_DEVICE_DISCONNECTED = 4,
- ICM_EVENT_XDOMAIN_CONNECTED = 6,
- ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
+ ICM_EVENT_DEVICE_CONNECTED = 0x3,
+ ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
+ ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
+ ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
+ ICM_EVENT_RTD3_VETO = 0xa,
};
struct icm_pkg_header {
@@ -463,6 +464,13 @@ struct icm_tr_pkg_disconnect_xdomain_response {
uuid_t remote_uuid;
};
+/* Ice Lake messages */
+
+struct icm_icl_event_rtd3_veto {
+ struct icm_pkg_header hdr;
+ u32 veto_reason;
+};
+
/* XDomain messages */
struct tb_xdomain_header {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 31d0234837e4..5a99234826e7 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -211,7 +211,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return NULL;
}
tb_pci_init_path(path);
- tunnel->paths[TB_PCI_PATH_UP] = path;
+ tunnel->paths[TB_PCI_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
"PCIe Up");
@@ -220,7 +220,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return NULL;
}
tb_pci_init_path(path);
- tunnel->paths[TB_PCI_PATH_DOWN] = path;
+ tunnel->paths[TB_PCI_PATH_UP] = path;
return tunnel;
}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 5118d46702d5..4e17a7c7bf0a 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -636,7 +636,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much
* allowed.
*/
- return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+ return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index f32cef94aa82..ebcf1434e296 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -200,10 +200,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
if (!uioinfo->irq) {
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (ret < 0)
goto bad1;
- }
uioinfo->irq = ret;
}
uiomem = &uioinfo->mem[0];
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 10688d79d180..1303b165055b 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -102,12 +102,15 @@ static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
static int uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
+ struct device_node *node = pdev->dev.of_node;
struct uio_pdrv_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
int i;
- if (pdev->dev.of_node) {
+ if (node) {
+ const char *name;
+
/* alloc uioinfo for one device */
uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
GFP_KERNEL);
@@ -115,8 +118,13 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
- uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
- pdev->dev.of_node);
+
+ if (!of_property_read_string(node, "linux,uio-name", &name))
+ uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
+ else
+ uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%pOFn", node);
+
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 7ae260577901..24b9a8e05f64 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -65,5 +65,14 @@ config HDQ_MASTER_OMAP
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
+config W1_MASTER_SGI
+ tristate "SGI ASIC driver"
+ help
+ Say Y here if you want support for your 1-wire devices using
+ SGI ASIC 1-Wire interface
+
+ This support is also available as a module. If so, the module
+ will be called sgi_w1.
+
endmenu
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 18954cae4256..dae629b7ab49 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
+obj-$(CONFIG_W1_MASTER_SGI) += sgi_w1.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index c3b2095ef6a9..1ca880e01476 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -92,7 +92,6 @@ static int mxc_w1_probe(struct platform_device *pdev)
{
struct mxc_w1_device *mdev;
unsigned long clkrate;
- struct resource *res;
unsigned int clkdiv;
int err;
@@ -120,8 +119,7 @@ static int mxc_w1_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"Incorrect time base frequency %lu Hz\n", clkrate);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ mdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->regs)) {
err = PTR_ERR(mdev->regs);
goto out_disable_clk;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3099052e1243..4164045866b3 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -660,7 +660,6 @@ static int omap_hdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdq_data *hdq_data;
- struct resource *res;
int ret, irq;
u8 rev;
const char *mode;
@@ -674,8 +673,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
hdq_data->dev = dev;
platform_set_drvdata(pdev, hdq_data);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdq_data->hdq_base = devm_ioremap_resource(dev, res);
+ hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
new file mode 100644
index 000000000000..1b2d96b945be
--- /dev/null
+++ b/drivers/w1/masters/sgi_w1.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sgi_w1.c - w1 master driver for one wire support in SGI ASICs
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+
+#include <linux/w1.h>
+
+#define MCR_RD_DATA BIT(0)
+#define MCR_DONE BIT(1)
+
+#define MCR_PACK(pulse, sample) (((pulse) << 10) | ((sample) << 2))
+
+struct sgi_w1_device {
+ u32 __iomem *mcr;
+ struct w1_bus_master bus_master;
+ char dev_id[64];
+};
+
+static u8 sgi_w1_wait(u32 __iomem *mcr)
+{
+ u32 mcr_val;
+
+ do {
+ mcr_val = readl(mcr);
+ } while (!(mcr_val & MCR_DONE));
+
+ return (mcr_val & MCR_RD_DATA) ? 1 : 0;
+}
+
+/*
+ * this is the low level routine to
+ * reset the device on the One Wire interface
+ * on the hardware
+ */
+static u8 sgi_w1_reset_bus(void *data)
+{
+ struct sgi_w1_device *dev = data;
+ u8 ret;
+
+ writel(MCR_PACK(520, 65), dev->mcr);
+ ret = sgi_w1_wait(dev->mcr);
+ udelay(500); /* recovery time */
+ return ret;
+}
+
+/*
+ * this is the low level routine to read/write a bit on the One Wire
+ * interface on the hardware. It does write 0 if parameter bit is set
+ * to 0, otherwise a write 1/read.
+ */
+static u8 sgi_w1_touch_bit(void *data, u8 bit)
+{
+ struct sgi_w1_device *dev = data;
+ u8 ret;
+
+ if (bit)
+ writel(MCR_PACK(6, 13), dev->mcr);
+ else
+ writel(MCR_PACK(80, 30), dev->mcr);
+
+ ret = sgi_w1_wait(dev->mcr);
+ if (bit)
+ udelay(100); /* recovery */
+ return ret;
+}
+
+static int sgi_w1_probe(struct platform_device *pdev)
+{
+ struct sgi_w1_device *sdev;
+ struct sgi_w1_platform_data *pdata;
+ struct resource *res;
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
+ GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sdev->mcr))
+ return PTR_ERR(sdev->mcr);
+
+ sdev->bus_master.data = sdev;
+ sdev->bus_master.reset_bus = sgi_w1_reset_bus;
+ sdev->bus_master.touch_bit = sgi_w1_touch_bit;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ strlcpy(sdev->dev_id, pdata->dev_id, sizeof(sdev->dev_id));
+ sdev->bus_master.dev_id = sdev->dev_id;
+ }
+
+ platform_set_drvdata(pdev, sdev);
+
+ return w1_add_master_device(&sdev->bus_master);
+}
+
+/*
+ * disassociate the w1 device from the driver
+ */
+static int sgi_w1_remove(struct platform_device *pdev)
+{
+ struct sgi_w1_device *sdev = platform_get_drvdata(pdev);
+
+ w1_remove_master_device(&sdev->bus_master);
+
+ return 0;
+}
+
+static struct platform_driver sgi_w1_driver = {
+ .driver = {
+ .name = "sgi_w1",
+ },
+ .probe = sgi_w1_probe,
+ .remove = sgi_w1_remove,
+};
+module_platform_driver(sgi_w1_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for One-Wire IP in SGI ASICs");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 37aaad26b373..ebed495b9e69 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -101,6 +101,12 @@ config W1_SLAVE_DS2438
Say Y here if you want to use a 1-wire
DS2438 Smart Battery Monitor device support
+config W1_SLAVE_DS250X
+ tristate "512b/1kb/16kb EPROM family support"
+ help
+ Say Y here if you want to use a 1-wire
+ 512b/1kb/16kb EPROM family device (DS250x).
+
config W1_SLAVE_DS2780
tristate "Dallas 2780 battery monitor chip"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index eab29f151413..8e9655eaa478 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
+obj-$(CONFIG_W1_SLAVE_DS250X) += w1_ds250x.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
diff --git a/drivers/w1/slaves/w1_ds250x.c b/drivers/w1/slaves/w1_ds250x.c
new file mode 100644
index 000000000000..e507117444d8
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds250x.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * w1_ds250x.c - w1 family 09/0b/89/91 (DS250x) driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/crc16.h>
+
+#include <linux/w1.h>
+#include <linux/nvmem-provider.h>
+
+#define W1_DS2501_UNW_FAMILY 0x91
+#define W1_DS2501_SIZE 64
+
+#define W1_DS2502_FAMILY 0x09
+#define W1_DS2502_UNW_FAMILY 0x89
+#define W1_DS2502_SIZE 128
+
+#define W1_DS2505_FAMILY 0x0b
+#define W1_DS2505_SIZE 2048
+
+#define W1_PAGE_SIZE 32
+
+#define W1_EXT_READ_MEMORY 0xA5
+#define W1_READ_DATA_CRC 0xC3
+
+#define OFF2PG(off) ((off) / W1_PAGE_SIZE)
+
+#define CRC16_INIT 0
+#define CRC16_VALID 0xb001
+
+struct w1_eprom_data {
+ size_t size;
+ int (*read)(struct w1_slave *sl, int pageno);
+ u8 eprom[W1_DS2505_SIZE];
+ DECLARE_BITMAP(page_present, W1_DS2505_SIZE / W1_PAGE_SIZE);
+ char nvmem_name[64];
+};
+
+static int w1_ds2502_read_page(struct w1_slave *sl, int pageno)
+{
+ struct w1_eprom_data *data = sl->family_data;
+ int pgoff = pageno * W1_PAGE_SIZE;
+ int ret = -EIO;
+ u8 buf[3];
+ u8 crc8;
+
+ if (test_bit(pageno, data->page_present))
+ return 0; /* page already present */
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_reset_select_slave(sl))
+ goto err;
+
+ buf[0] = W1_READ_DATA_CRC;
+ buf[1] = pgoff & 0xff;
+ buf[2] = pgoff >> 8;
+ w1_write_block(sl->master, buf, 3);
+
+ crc8 = w1_read_8(sl->master);
+ if (w1_calc_crc8(buf, 3) != crc8)
+ goto err;
+
+ w1_read_block(sl->master, &data->eprom[pgoff], W1_PAGE_SIZE);
+
+ crc8 = w1_read_8(sl->master);
+ if (w1_calc_crc8(&data->eprom[pgoff], W1_PAGE_SIZE) != crc8)
+ goto err;
+
+ set_bit(pageno, data->page_present); /* mark page present */
+ ret = 0;
+err:
+ mutex_unlock(&sl->master->bus_mutex);
+ return ret;
+}
+
+static int w1_ds2505_read_page(struct w1_slave *sl, int pageno)
+{
+ struct w1_eprom_data *data = sl->family_data;
+ int redir_retries = 16;
+ int pgoff, epoff;
+ int ret = -EIO;
+ u8 buf[6];
+ u8 redir;
+ u16 crc;
+
+ if (test_bit(pageno, data->page_present))
+ return 0; /* page already present */
+
+ epoff = pgoff = pageno * W1_PAGE_SIZE;
+ mutex_lock(&sl->master->bus_mutex);
+
+retry:
+ if (w1_reset_select_slave(sl))
+ goto err;
+
+ buf[0] = W1_EXT_READ_MEMORY;
+ buf[1] = pgoff & 0xff;
+ buf[2] = pgoff >> 8;
+ w1_write_block(sl->master, buf, 3);
+ w1_read_block(sl->master, buf + 3, 3); /* redir, crc16 */
+ redir = buf[3];
+ crc = crc16(CRC16_INIT, buf, 6);
+
+ if (crc != CRC16_VALID)
+ goto err;
+
+
+ if (redir != 0xff) {
+ redir_retries--;
+ if (redir_retries < 0)
+ goto err;
+
+ pgoff = (redir ^ 0xff) * W1_PAGE_SIZE;
+ goto retry;
+ }
+
+ w1_read_block(sl->master, &data->eprom[epoff], W1_PAGE_SIZE);
+ w1_read_block(sl->master, buf, 2); /* crc16 */
+ crc = crc16(CRC16_INIT, &data->eprom[epoff], W1_PAGE_SIZE);
+ crc = crc16(crc, buf, 2);
+
+ if (crc != CRC16_VALID)
+ goto err;
+
+ set_bit(pageno, data->page_present);
+ ret = 0;
+err:
+ mutex_unlock(&sl->master->bus_mutex);
+ return ret;
+}
+
+static int w1_nvmem_read(void *priv, unsigned int off, void *buf, size_t count)
+{
+ struct w1_slave *sl = priv;
+ struct w1_eprom_data *data = sl->family_data;
+ size_t eprom_size = data->size;
+ int ret;
+ int i;
+
+ if (off > eprom_size)
+ return -EINVAL;
+
+ if ((off + count) > eprom_size)
+ count = eprom_size - off;
+
+ i = OFF2PG(off);
+ do {
+ ret = data->read(sl, i++);
+ if (ret < 0)
+ return ret;
+ } while (i < OFF2PG(off + count));
+
+ memcpy(buf, &data->eprom[off], count);
+ return 0;
+}
+
+static int w1_eprom_add_slave(struct w1_slave *sl)
+{
+ struct w1_eprom_data *data;
+ struct nvmem_device *nvmem;
+ struct nvmem_config nvmem_cfg = {
+ .dev = &sl->dev,
+ .reg_read = w1_nvmem_read,
+ .type = NVMEM_TYPE_OTP,
+ .read_only = true,
+ .word_size = 1,
+ .priv = sl,
+ .id = -1
+ };
+
+ data = devm_kzalloc(&sl->dev, sizeof(struct w1_eprom_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ sl->family_data = data;
+ switch (sl->family->fid) {
+ case W1_DS2501_UNW_FAMILY:
+ data->size = W1_DS2501_SIZE;
+ data->read = w1_ds2502_read_page;
+ break;
+ case W1_DS2502_FAMILY:
+ case W1_DS2502_UNW_FAMILY:
+ data->size = W1_DS2502_SIZE;
+ data->read = w1_ds2502_read_page;
+ break;
+ case W1_DS2505_FAMILY:
+ data->size = W1_DS2505_SIZE;
+ data->read = w1_ds2505_read_page;
+ break;
+ }
+
+ if (sl->master->bus_master->dev_id)
+ snprintf(data->nvmem_name, sizeof(data->nvmem_name),
+ "%s-%02x-%012llx",
+ sl->master->bus_master->dev_id, sl->reg_num.family,
+ (unsigned long long)sl->reg_num.id);
+ else
+ snprintf(data->nvmem_name, sizeof(data->nvmem_name),
+ "%02x-%012llx",
+ sl->reg_num.family,
+ (unsigned long long)sl->reg_num.id);
+
+ nvmem_cfg.name = data->nvmem_name;
+ nvmem_cfg.size = data->size;
+
+ nvmem = devm_nvmem_register(&sl->dev, &nvmem_cfg);
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct w1_family_ops w1_eprom_fops = {
+ .add_slave = w1_eprom_add_slave,
+};
+
+static struct w1_family w1_family_09 = {
+ .fid = W1_DS2502_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static struct w1_family w1_family_0b = {
+ .fid = W1_DS2505_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static struct w1_family w1_family_89 = {
+ .fid = W1_DS2502_UNW_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static struct w1_family w1_family_91 = {
+ .fid = W1_DS2501_UNW_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static int __init w1_ds250x_init(void)
+{
+ int err;
+
+ err = w1_register_family(&w1_family_09);
+ if (err)
+ return err;
+
+ err = w1_register_family(&w1_family_0b);
+ if (err)
+ goto err_0b;
+
+ err = w1_register_family(&w1_family_89);
+ if (err)
+ goto err_89;
+
+ err = w1_register_family(&w1_family_91);
+ if (err)
+ goto err_91;
+
+ return 0;
+
+err_91:
+ w1_unregister_family(&w1_family_89);
+err_89:
+ w1_unregister_family(&w1_family_0b);
+err_0b:
+ w1_unregister_family(&w1_family_09);
+ return err;
+}
+
+static void __exit w1_ds250x_exit(void)
+{
+ w1_unregister_family(&w1_family_09);
+ w1_unregister_family(&w1_family_0b);
+ w1_unregister_family(&w1_family_89);
+ w1_unregister_family(&w1_family_91);
+}
+
+module_init(w1_ds250x_init);
+module_exit(w1_ds250x_exit);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfe@suse.de>");
+MODULE_DESCRIPTION("w1 family driver for DS250x Add Only Memory");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_FAMILY));
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2505_FAMILY));
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2501_UNW_FAMILY));
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_UNW_FAMILY));