summaryrefslogtreecommitdiff
path: root/drivers/accel/ivpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/accel/ivpu')
-rw-r--r--drivers/accel/ivpu/Makefile7
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c294
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.h13
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c49
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h24
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c107
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h4
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.c142
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.h38
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h13
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c (renamed from drivers/accel/ivpu/ivpu_hw_mtl.c)496
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx_reg.h281
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c1178
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h267
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl_reg.h281
-rw-r--r--drivers/accel/ivpu/ivpu_job.c4
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c98
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c302
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c1
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h1
22 files changed, 2894 insertions, 725 deletions
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index 80f1fb3548ae..e4328b430564 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -2,10 +2,13 @@
# Copyright (C) 2023 Intel Corporation
intel_vpu-y := \
+ ivpu_debugfs.o \
ivpu_drv.o \
ivpu_fw.o \
+ ivpu_fw_log.o \
ivpu_gem.o \
- ivpu_hw_mtl.o \
+ ivpu_hw_37xx.o \
+ ivpu_hw_40xx.o \
ivpu_ipc.o \
ivpu_job.o \
ivpu_jsm_msg.o \
@@ -13,4 +16,4 @@ intel_vpu-y := \
ivpu_mmu_context.o \
ivpu_pm.o
-obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o \ No newline at end of file
+obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
new file mode 100644
index 000000000000..5e5996fd4f9f
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+#include <uapi/drm/ivpu_accel.h>
+
+#include "ivpu_debugfs.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
+#include "ivpu_gem.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+
+static int bo_list_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_printer p = drm_seq_file_printer(s);
+
+ ivpu_bo_list(node->minor->dev, &p);
+
+ return 0;
+}
+
+static int fw_name_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+ seq_printf(s, "%s\n", vdev->fw->name);
+ return 0;
+}
+
+static int fw_trace_capability_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+ u64 trace_hw_component_mask;
+ u32 trace_destination_mask;
+ int ret;
+
+ ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask,
+ &trace_hw_component_mask);
+ if (!ret) {
+ seq_printf(s,
+ "trace_destination_mask: %#18x\n"
+ "trace_hw_component_mask: %#18llx\n",
+ trace_destination_mask, trace_hw_component_mask);
+ }
+ return 0;
+}
+
+static int fw_trace_config_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+ /**
+ * WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet,
+ * so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config()
+ */
+ u32 trace_level = vdev->fw->trace_level;
+ u32 trace_destination_mask = vdev->fw->trace_destination_mask;
+ u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
+
+ seq_printf(s,
+ "trace_level: %#18x\n"
+ "trace_destination_mask: %#18x\n"
+ "trace_hw_component_mask: %#18llx\n",
+ trace_level, trace_destination_mask, trace_hw_component_mask);
+
+ return 0;
+}
+
+static int last_bootmode_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+ seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot");
+
+ return 0;
+}
+
+static int reset_counter_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+ seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter));
+ return 0;
+}
+
+static int reset_pending_show(struct seq_file *s, void *v)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+ seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+ return 0;
+}
+
+static const struct drm_info_list vdev_debugfs_list[] = {
+ {"bo_list", bo_list_show, 0},
+ {"fw_name", fw_name_show, 0},
+ {"fw_trace_capability", fw_trace_capability_show, 0},
+ {"fw_trace_config", fw_trace_config_show, 0},
+ {"last_bootmode", last_bootmode_show, 0},
+ {"reset_counter", reset_counter_show, 0},
+ {"reset_pending", reset_pending_show, 0},
+};
+
+static int fw_log_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = s->private;
+ struct drm_printer p = drm_seq_file_printer(s);
+
+ ivpu_fw_log_print(vdev, true, &p);
+ return 0;
+}
+
+static int fw_log_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fw_log_show, inode->i_private);
+}
+
+static ssize_t
+fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct ivpu_device *vdev = s->private;
+
+ if (!size)
+ return -EINVAL;
+
+ ivpu_fw_log_clear(vdev);
+ return size;
+}
+
+static const struct file_operations fw_log_fops = {
+ .owner = THIS_MODULE,
+ .open = fw_log_fops_open,
+ .write = fw_log_fops_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t
+fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
+ size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 trace_destination_mask;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask);
+ if (ret < 0)
+ return ret;
+
+ fw->trace_destination_mask = trace_destination_mask;
+
+ ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask,
+ fw->trace_hw_component_mask);
+
+ return size;
+}
+
+static const struct file_operations fw_trace_destination_mask_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = fw_trace_destination_mask_fops_write,
+};
+
+static ssize_t
+fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf,
+ size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ struct ivpu_fw_info *fw = vdev->fw;
+ u64 trace_hw_component_mask;
+ int ret;
+
+ ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask);
+ if (ret < 0)
+ return ret;
+
+ fw->trace_hw_component_mask = trace_hw_component_mask;
+
+ ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask,
+ trace_hw_component_mask);
+
+ return size;
+}
+
+static const struct file_operations fw_trace_hw_comp_mask_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = fw_trace_hw_comp_mask_fops_write,
+};
+
+static ssize_t
+fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 trace_level;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, size, 0, &trace_level);
+ if (ret < 0)
+ return ret;
+
+ fw->trace_level = trace_level;
+
+ ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask,
+ fw->trace_hw_component_mask);
+
+ return size;
+}
+
+static const struct file_operations fw_trace_level_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = fw_trace_level_fops_write,
+};
+
+static ssize_t
+ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+
+ if (!size)
+ return -EINVAL;
+
+ if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+ return -ENODEV;
+ if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
+ return -ENODEV;
+
+ return size;
+}
+
+static ssize_t
+ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+
+ if (!size)
+ return -EINVAL;
+
+ ivpu_pm_schedule_recovery(vdev);
+ return size;
+}
+
+static const struct file_operations ivpu_force_recovery_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ivpu_force_recovery_fn,
+};
+
+static const struct file_operations ivpu_reset_engine_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ivpu_reset_engine_fn,
+};
+
+void ivpu_debugfs_init(struct drm_minor *minor)
+{
+ struct ivpu_device *vdev = to_ivpu_device(minor->dev);
+
+ drm_debugfs_create_files(vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list),
+ minor->debugfs_root, minor);
+
+ debugfs_create_file("force_recovery", 0200, minor->debugfs_root, vdev,
+ &ivpu_force_recovery_fops);
+
+ debugfs_create_file("fw_log", 0644, minor->debugfs_root, vdev,
+ &fw_log_fops);
+ debugfs_create_file("fw_trace_destination_mask", 0200, minor->debugfs_root, vdev,
+ &fw_trace_destination_mask_fops);
+ debugfs_create_file("fw_trace_hw_comp_mask", 0200, minor->debugfs_root, vdev,
+ &fw_trace_hw_comp_mask_fops);
+ debugfs_create_file("fw_trace_level", 0200, minor->debugfs_root, vdev,
+ &fw_trace_level_fops);
+
+ debugfs_create_file("reset_engine", 0200, minor->debugfs_root, vdev,
+ &ivpu_reset_engine_fops);
+}
diff --git a/drivers/accel/ivpu/ivpu_debugfs.h b/drivers/accel/ivpu/ivpu_debugfs.h
new file mode 100644
index 000000000000..78f80c1e00e4
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_DEBUGFS_H__
+#define __IVPU_DEBUGFS_H__
+
+struct drm_minor;
+
+void ivpu_debugfs_init(struct drm_minor *minor);
+
+#endif /* __IVPU_DEBUGFS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 8396db2b5203..ba79f397c9e8 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_prime.h>
#include "vpu_boot_api.h"
+#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_gem.h"
@@ -50,6 +51,10 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
+bool ivpu_disable_mmu_cont_pages;
+module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
+MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
+
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -110,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
+static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+{
+ switch (args->index) {
+ case DRM_IVPU_CAP_METRIC_STREAMER:
+ args->value = 0;
+ break;
+ case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
+ args->value = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -139,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev);
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
- args->value = vdev->hw->ranges.user_low.start;
+ args->value = vdev->hw->ranges.user.start;
break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
@@ -169,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_SKU:
args->value = vdev->hw->sku;
break;
+ case DRM_IVPU_PARAM_CAPABILITIES:
+ ret = ivpu_get_capabilities(vdev, args);
+ break;
default:
ret = -EINVAL;
break;
@@ -369,10 +393,11 @@ static const struct drm_driver driver = {
.open = ivpu_open,
.postclose = ivpu_postclose,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = ivpu_gem_prime_import,
- .gem_prime_mmap = drm_gem_prime_mmap,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = ivpu_debugfs_init,
+#endif
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -427,7 +452,7 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
return PTR_ERR(vdev->regb);
}
- ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38));
+ ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
if (ret) {
ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
return ret;
@@ -437,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
- /* VPU MTL does not require PCI spec 10m D3hot delay */
- if (ivpu_is_mtl(vdev))
+ /* VPU 37XX does not require 10m D3hot delay */
+ if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
@@ -476,7 +501,14 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm)
return -ENOMEM;
- vdev->hw->ops = &ivpu_hw_mtl_ops;
+ if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
+ vdev->hw->ops = &ivpu_hw_40xx_ops;
+ vdev->hw->dma_bits = 48;
+ } else {
+ vdev->hw->ops = &ivpu_hw_37xx_ops;
+ vdev->hw->dma_bits = 38;
+ }
+
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
@@ -602,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 399dc5dcefd7..9e8c075fe9ef 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -23,6 +23,10 @@
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_LNL 0x643e
+
+#define IVPU_HW_37XX 37
+#define IVPU_HW_40XX 40
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent invalid context */
@@ -76,6 +80,7 @@ struct ivpu_wa_table {
bool clear_runtime_mem;
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
+ bool disable_clock_relinquish;
};
struct ivpu_hw_info;
@@ -132,6 +137,7 @@ struct ivpu_file_priv {
extern int ivpu_dbg_mask;
extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
+extern bool ivpu_disable_mmu_cont_pages;
#define IVPU_TEST_MODE_DISABLED 0
#define IVPU_TEST_MODE_FW_TEST 1
@@ -145,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
-static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
-{
- return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
-}
-
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->revision;
@@ -160,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device;
}
+static inline int ivpu_hw_gen(struct ivpu_device *vdev)
+{
+ switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_MTL:
+ return IVPU_HW_37XX;
+ case PCI_DEVICE_ID_LNL:
+ return IVPU_HW_40XX;
+ default:
+ ivpu_err(vdev, "Unknown VPU device\n");
+ return 0;
+ }
+}
+
static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
{
return container_of(dev, struct ivpu_device, drm);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index f58951a0d81b..9827ea4d7b83 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -11,6 +11,7 @@
#include "vpu_boot_api.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@@ -42,22 +43,39 @@ static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
+/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
+static struct {
+ int gen;
+ const char *name;
+} fw_names[] = {
+ { IVPU_HW_37XX, "vpu_37xx.bin" },
+ { IVPU_HW_37XX, "mtl_vpu.bin" },
+ { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+ { IVPU_HW_40XX, "vpu_40xx.bin" },
+ { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+};
+
static int ivpu_fw_request(struct ivpu_device *vdev)
{
- static const char * const fw_names[] = {
- "mtl_vpu.bin",
- "intel/vpu/mtl_vpu_v0.0.bin"
- };
int ret = -ENOENT;
int i;
- if (ivpu_firmware)
- return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
+ if (ivpu_firmware) {
+ ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
+ if (!ret)
+ vdev->fw->name = ivpu_firmware;
+ return ret;
+ }
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
- ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
- if (!ret)
+ if (fw_names[i].gen != ivpu_hw_gen(vdev))
+ continue;
+
+ ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
+ if (!ret) {
+ vdev->fw->name = fw_names[i].name;
return 0;
+ }
}
ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
@@ -142,7 +160,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
}
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
- ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
+
+ ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
+ (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
@@ -158,6 +178,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
+ fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
+ fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
+ fw->trace_hw_component_mask = -1;
+
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
@@ -182,13 +206,14 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL;
}
- ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
+ ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0;
}
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
+ int log_verb_size;
int ret;
ret = ivpu_fw_update_global_range(vdev);
@@ -201,17 +226,45 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
return -ENOMEM;
}
+ fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
+ DRM_IVPU_BO_CACHED);
+ if (!fw->mem_log_crit) {
+ ivpu_err(vdev, "Failed to allocate critical log buffer\n");
+ ret = -ENOMEM;
+ goto err_free_fw_mem;
+ }
+
+ if (ivpu_log_level <= IVPU_FW_LOG_INFO)
+ log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
+ else
+ log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
+
+ fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
+ if (!fw->mem_log_verb) {
+ ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
+ ret = -ENOMEM;
+ goto err_free_log_crit;
+ }
+
if (fw->shave_nn_size) {
- fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
+ fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
- ivpu_bo_free_internal(fw->mem);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_log_verb;
}
}
return 0;
+
+err_free_log_verb:
+ ivpu_bo_free_internal(fw->mem_log_verb);
+err_free_log_crit:
+ ivpu_bo_free_internal(fw->mem_log_crit);
+err_free_fw_mem:
+ ivpu_bo_free_internal(fw->mem);
+ return ret;
}
static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
@@ -223,7 +276,12 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
fw->mem_shave_nn = NULL;
}
+ ivpu_bo_free_internal(fw->mem_log_verb);
+ ivpu_bo_free_internal(fw->mem_log_crit);
ivpu_bo_free_internal(fw->mem);
+
+ fw->mem_log_verb = NULL;
+ fw->mem_log_crit = NULL;
fw->mem = NULL;
}
@@ -387,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
- boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
- boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
- vdev->hw->ranges.global_low.start;
+ boot_params->shared_region_base = vdev->hw->ranges.global.start;
+ boot_params->shared_region_size = vdev->hw->ranges.global.end -
+ vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
@@ -397,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
- boot_params->global_aliased_pio_base =
- vdev->hw->ranges.global_aliased_pio.start;
- boot_params->global_aliased_pio_size =
- ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
+ boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
+ boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@@ -408,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
/* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
- ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
+ ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
@@ -424,6 +480,15 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
+ boot_params->default_trace_level = vdev->fw->trace_level;
+ boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
+ boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
+ boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
+ boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
+ boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
+ boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
+ boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
+
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 8d275c802d1c..8567fdf925fe 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -12,6 +12,7 @@ struct vpu_boot_params;
struct ivpu_fw_info {
const struct firmware *file;
+ const char *name;
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
@@ -23,6 +24,9 @@ struct ivpu_fw_info {
u32 shave_nn_size;
u64 entry_point; /* Cold or warm boot entry point for next boot */
u64 cold_boot_entry_point;
+ u32 trace_level;
+ u32 trace_destination_mask;
+ u64 trace_hw_component_mask;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
new file mode 100644
index 000000000000..95065cac9fbd
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_fw_log.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
+#include "ivpu_gem.h"
+
+#define IVPU_FW_LOG_LINE_LENGTH 256
+
+unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
+module_param(ivpu_log_level, uint, 0444);
+MODULE_PARM_DESC(ivpu_log_level,
+ "VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+ " info=" __stringify(IVPU_FW_LOG_INFO)
+ " warn=" __stringify(IVPU_FW_LOG_WARN)
+ " error=" __stringify(IVPU_FW_LOG_ERROR)
+ " fatal=" __stringify(IVPU_FW_LOG_FATAL));
+
+static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+ struct vpu_tracing_buffer_header **log_header)
+{
+ struct vpu_tracing_buffer_header *log;
+
+ if ((*offset + sizeof(*log)) > bo->base.size)
+ return -EINVAL;
+
+ log = bo->kvaddr + *offset;
+
+ if (log->vpu_canary_start != VPU_TRACING_BUFFER_CANARY)
+ return -EINVAL;
+
+ if (log->header_size < sizeof(*log) || log->header_size > 1024) {
+ ivpu_dbg(vdev, FW_BOOT, "Invalid header size 0x%x\n", log->header_size);
+ return -EINVAL;
+ }
+ if ((char *)log + log->size > (char *)bo->kvaddr + bo->base.size) {
+ ivpu_dbg(vdev, FW_BOOT, "Invalid log size 0x%x\n", log->size);
+ return -EINVAL;
+ }
+
+ *log_header = log;
+ *offset += log->size;
+
+ ivpu_dbg(vdev, FW_BOOT,
+ "FW log name \"%s\", write offset 0x%x size 0x%x, wrap count %d, hdr version %d size %d format %d, alignment %d",
+ log->name, log->write_index, log->size, log->wrap_count, log->header_version,
+ log->header_size, log->format, log->alignment);
+
+ return 0;
+}
+
+static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
+{
+ char line[IVPU_FW_LOG_LINE_LENGTH];
+ u32 index = 0;
+
+ if (!size || !buffer)
+ return;
+
+ while (size--) {
+ if (*buffer == '\n' || *buffer == 0) {
+ line[index] = 0;
+ if (index != 0)
+ drm_printf(p, "%s\n", line);
+ index = 0;
+ buffer++;
+ continue;
+ }
+ if (index == IVPU_FW_LOG_LINE_LENGTH - 1) {
+ line[index] = 0;
+ index = 0;
+ drm_printf(p, "%s\n", line);
+ }
+ if (*buffer != '\r' && (isprint(*buffer) || iscntrl(*buffer)))
+ line[index++] = *buffer;
+ buffer++;
+ }
+ line[index] = 0;
+ if (index != 0)
+ drm_printf(p, "%s\n", line);
+}
+
+static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
+ const char *prefix, bool only_new_msgs, struct drm_printer *p)
+{
+ char *log_buffer = (void *)log + log->header_size;
+ u32 log_size = log->size - log->header_size;
+ u32 log_start = log->read_index;
+ u32 log_end = log->write_index;
+
+ if (!(log->write_index || log->wrap_count) ||
+ (log->write_index == log->read_index && only_new_msgs)) {
+ drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
+ return;
+ }
+
+ drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
+ if (log->write_index > log->read_index) {
+ buffer_print(log_buffer + log_start, log_end - log_start, p);
+ } else {
+ buffer_print(log_buffer + log_end, log_size - log_end, p);
+ buffer_print(log_buffer, log_end, p);
+ }
+ drm_printf(p, "\x1b[0m");
+ drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
+}
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+{
+ struct vpu_tracing_buffer_header *log_header;
+ u32 next = 0;
+
+ while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
+ fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p);
+
+ next = 0;
+ while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
+ fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p);
+}
+
+void ivpu_fw_log_clear(struct ivpu_device *vdev)
+{
+ struct vpu_tracing_buffer_header *log_header;
+ u32 next = 0;
+
+ while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
+ log_header->read_index = log_header->write_index;
+
+ next = 0;
+ while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
+ log_header->read_index = log_header->write_index;
+}
diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
new file mode 100644
index 000000000000..0b2573f6f315
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_fw_log.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_FW_LOG_H__
+#define __IVPU_FW_LOG_H__
+
+#include <linux/types.h>
+
+#include <drm/drm_print.h>
+
+#include "ivpu_drv.h"
+
+#define IVPU_FW_LOG_DEFAULT 0
+#define IVPU_FW_LOG_DEBUG 1
+#define IVPU_FW_LOG_INFO 2
+#define IVPU_FW_LOG_WARN 3
+#define IVPU_FW_LOG_ERROR 4
+#define IVPU_FW_LOG_FATAL 5
+
+extern unsigned int ivpu_log_level;
+
+#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M
+#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M
+#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+void ivpu_fw_log_clear(struct ivpu_device *vdev);
+
+static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
+{
+ struct drm_printer p = drm_info_printer(vdev->drm.dev);
+
+ ivpu_fw_log_print(vdev, false, &p);
+}
+
+#endif /* __IVPU_FW_LOG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 9967fcfa27ec..d09f13b35902 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -282,10 +282,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret;
if (!range) {
- if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
- range = &vdev->hw->ranges.user_high;
+ if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
+ range = &vdev->hw->ranges.shave;
+ else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
+ range = &vdev->hw->ranges.dma;
else
- range = &vdev->hw->ranges.user_low;
+ range = &vdev->hw->ranges.user;
}
mutex_lock(&ctx->lock);
@@ -573,7 +575,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size;
range = &fixed_range;
} else {
- range = &vdev->hw->ranges.global_low;
+ range = &vdev->hw->ranges.global;
}
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 50a9304ab09c..ab341237bcf9 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info {
const struct ivpu_hw_ops *ops;
struct {
- struct ivpu_addr_range global_low;
- struct ivpu_addr_range global_high;
- struct ivpu_addr_range user_low;
- struct ivpu_addr_range user_high;
- struct ivpu_addr_range global_aliased_pio;
+ struct ivpu_addr_range global;
+ struct ivpu_addr_range user;
+ struct ivpu_addr_range shave;
+ struct ivpu_addr_range dma;
} ranges;
struct {
u8 min_ratio;
@@ -57,9 +56,11 @@ struct ivpu_hw_info {
u32 tile_fuse;
u32 sku;
u16 config;
+ int dma_bits;
};
-extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
+extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
+extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 2a5dd3a5dc46..9eae1c241bc0 100644
--- a/drivers/accel/ivpu/ivpu_hw_mtl.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -5,7 +5,7 @@
#include "ivpu_drv.h"
#include "ivpu_fw.h"
-#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@@ -39,34 +39,34 @@
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
-#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
-#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
- (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
-#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
@@ -84,8 +84,8 @@ static char *ivpu_platform_to_str(u32 platform)
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
- u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
- u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
+ u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
+ u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
vdev->platform = platform;
@@ -123,7 +123,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
{
- return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
/* Send KMD initiated workpoint change */
@@ -139,23 +139,23 @@ static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ra
return ret;
}
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
- val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret)
@@ -171,7 +171,7 @@ static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
if (IVPU_WA(punit_disabled))
return 0;
- return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
@@ -179,7 +179,7 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
- return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
@@ -188,21 +188,21 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
u32 fmin_fuse, fmax_fuse;
- fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
- fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
- fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+ fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
+ fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
- fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
- fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+ fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
+ fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
-static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
{
- return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
+ return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
@@ -248,7 +248,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
return ret;
}
- ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
+ ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
return ret;
@@ -272,52 +272,52 @@ static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
u32 val = 0;
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
- REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
}
static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
if (enable) {
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
} else {
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
}
- REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
}
static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
if (enable) {
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
} else {
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
}
- REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
}
static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
- if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@@ -325,9 +325,9 @@ static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
- if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@@ -335,9 +335,9 @@ static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
- if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@@ -385,7 +385,7 @@ static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
{
- REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
}
static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
@@ -393,12 +393,12 @@ static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
- val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+ val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
- REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
@@ -453,26 +453,26 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
@@ -481,32 +481,32 @@ static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 ex
if (ivpu_is_fpga(vdev))
return 0;
- return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
+ return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
@@ -538,36 +538,25 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
- REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
+ REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
- if (ivpu_is_fpga(vdev)) {
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
- } else {
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val);
- }
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
- REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
+ REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
}
static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
@@ -587,10 +576,10 @@ static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = vdev->fw->entry_point >> 9;
- REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+ REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
- REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
+ REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
@@ -601,27 +590,27 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
- val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
if (enable)
- val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
else
- val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
- REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
+ val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
-static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
@@ -631,16 +620,15 @@ static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
ivpu_pll_init_frequency_ratios(vdev);
- ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
- ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
- ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
- ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
- hw->ranges.global_aliased_pio = hw->ranges.user_low;
+ ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
+ ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
+ ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
return 0;
}
-static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@@ -648,24 +636,24 @@ static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
- val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
- val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
- REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
-static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@@ -678,7 +666,7 @@ static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
@@ -689,7 +677,7 @@ static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
@@ -697,11 +685,11 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
- ret = ivpu_hw_mtl_reset(vdev);
+ ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
- ret = ivpu_hw_mtl_d0i3_disable(vdev);
+ ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -743,7 +731,7 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
{
ivpu_boot_no_snoop_enable(vdev);
ivpu_boot_tbu_mmu_enable(vdev);
@@ -752,32 +740,31 @@ static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
return 0;
}
-static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
+static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
- val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
- return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
- REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
+ return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
-static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
- if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
+ if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n");
- }
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
- if (ivpu_hw_mtl_d0i3_enable(vdev)) {
+ if (ivpu_hw_37xx_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
@@ -785,7 +772,7 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
return ret;
}
-static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
{
u32 val;
@@ -803,7 +790,7 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
}
-static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
@@ -817,35 +804,35 @@ static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
}
/* Register indirect accesses */
-static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
- pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
- pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
+ pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
+ pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
}
-static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
- return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
+ return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
}
-static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
{
- return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
+ return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
}
-static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
- return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
+ return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
}
-static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
@@ -853,52 +840,52 @@ static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
-static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
- return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
+ return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
-static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
- return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+ return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
-static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
-static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
{
- REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
}
-static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
{
- REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
- REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
- REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+ REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+ REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
}
-static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
{
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
- REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
- REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
}
-static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
ivpu_pm_schedule_recovery(vdev);
}
-static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
@@ -906,7 +893,7 @@ static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
ivpu_pm_schedule_recovery(vdev);
}
-static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
@@ -914,65 +901,66 @@ static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
}
/* Handler for IRQs from VPU core (irqV) */
-static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
+static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
{
- u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
+ REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
- ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
- ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
- ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
return status;
}
/* Handler for IRQs from Buttress core (irqB) */
-static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
+static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
- u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+ u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
if (status == 0)
return 0;
/* Disable global interrupt before handling local buttress interrupts */
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
+ REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
- ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
- REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
+ REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
- u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
+ u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
- ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
- REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
+ ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
@@ -982,12 +970,12 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
* Writing 1 triggers an interrupt, so we can't perform read update write.
* Clear local interrupt status by writing 0 to all bits.
*/
- REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
else
- REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@@ -995,65 +983,65 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
return status;
}
-static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
+static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
- ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
- ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
+ ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
+ ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
-static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
{
- u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+ u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
- if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
+ if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
ivpu_err(vdev, "WDT MSS timeout detected\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
ivpu_err(vdev, "WDT NCE timeout detected\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
ivpu_err(vdev, "NOC Firewall irq detected\n");
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
- ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
- u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
+ u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
- ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
}
}
-const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
- .info_init = ivpu_hw_mtl_info_init,
- .power_up = ivpu_hw_mtl_power_up,
- .is_idle = ivpu_hw_mtl_is_idle,
- .power_down = ivpu_hw_mtl_power_down,
- .boot_fw = ivpu_hw_mtl_boot_fw,
- .wdt_disable = ivpu_hw_mtl_wdt_disable,
- .diagnose_failure = ivpu_hw_mtl_diagnose_failure,
- .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
- .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
- .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
- .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
- .reg_db_set = ivpu_hw_mtl_reg_db_set,
- .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
- .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
- .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
- .irq_clear = ivpu_hw_mtl_irq_clear,
- .irq_enable = ivpu_hw_mtl_irq_enable,
- .irq_disable = ivpu_hw_mtl_irq_disable,
- .irq_handler = ivpu_hw_mtl_irq_handler,
+const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
+ .info_init = ivpu_hw_37xx_info_init,
+ .power_up = ivpu_hw_37xx_power_up,
+ .is_idle = ivpu_hw_37xx_is_idle,
+ .power_down = ivpu_hw_37xx_power_down,
+ .boot_fw = ivpu_hw_37xx_boot_fw,
+ .wdt_disable = ivpu_hw_37xx_wdt_disable,
+ .diagnose_failure = ivpu_hw_37xx_diagnose_failure,
+ .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
+ .reg_db_set = ivpu_hw_37xx_reg_db_set,
+ .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
+ .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
+ .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
+ .irq_clear = ivpu_hw_37xx_irq_clear,
+ .irq_enable = ivpu_hw_37xx_irq_enable,
+ .irq_disable = ivpu_hw_37xx_irq_disable,
+ .irq_handler = ivpu_hw_37xx_irq_handler,
};
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
new file mode 100644
index 000000000000..6e4e915948f9
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_MTL_REG_H__
+#define __IVPU_HW_MTL_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
+
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
+#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
+#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
+#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
+
+#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
+#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
+#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
+#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
+
+#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
+#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
+
+#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
+#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
+
+#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
+#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
+#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
+
+#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
+#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
+#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
+
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
+
+#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
+
+#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_CPR_RST_SET 0x00000094u
+#define VPU_37XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_CPR_RST_CLR 0x00000098u
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_HW_VERSION 0x00000108u
+#define VPU_37XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
+#define VPU_37XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
+#define VPU_37XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
+
+#define VPU_37XX_HOST_SS_GEN_CTRL 0x00000118u
+#define VPU_37XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
+
+#define VPU_37XX_HOST_SS_NOC_QREQN 0x00000154u
+#define VPU_37XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_NOC_QACCEPTN 0x00000158u
+#define VPU_37XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_NOC_QDENY 0x0000015cu
+#define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
+#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
+#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
+#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
+
+#define VPU_37XX_HOST_SS_ICB_STATUS_0 0x00010210u
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
+
+#define VPU_37XX_HOST_SS_ICB_STATUS_1 0x00010214u
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
+
+#define VPU_37XX_HOST_SS_ICB_CLEAR_0 0x00010220u
+#define VPU_37XX_HOST_SS_ICB_CLEAR_1 0x00010224u
+#define VPU_37XX_HOST_SS_ICB_ENABLE_0 0x00010240u
+
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
+
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
+#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
+#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
+#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
+
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
+
+#define VPU_37XX_HOST_MMU_IDR0 0x00200000u
+#define VPU_37XX_HOST_MMU_IDR1 0x00200004u
+#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu
+#define VPU_37XX_HOST_MMU_IDR5 0x00200014u
+#define VPU_37XX_HOST_MMU_CR0 0x00200020u
+#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u
+#define VPU_37XX_HOST_MMU_CR1 0x00200028u
+#define VPU_37XX_HOST_MMU_CR2 0x0020002cu
+#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u
+#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u
+
+#define VPU_37XX_HOST_MMU_GERROR 0x00200060u
+#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
+
+#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u
+
+#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u
+#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
+#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u
+#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u
+#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu
+#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u
+#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u
+#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu
+#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
+#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
+
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
+
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
+
+#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
+#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
+#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
+#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
+
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
+#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
+
+#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
+
+#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
new file mode 100644
index 000000000000..34626d66fa10
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_ipc.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#include <linux/dmi.h>
+
+#define TILE_MAX_NUM 6
+#define TILE_MAX_MASK 0x3f
+
+#define LNL_HW_ID 0x4040
+
+#define SKU_TILE_SHIFT 0u
+#define SKU_TILE_MASK 0x0000ffffu
+#define SKU_HW_ID_SHIFT 16u
+#define SKU_HW_ID_MASK 0xffff0000u
+
+#define PLL_CONFIG_DEFAULT 0x1
+#define PLL_CDYN_DEFAULT 0x80
+#define PLL_EPP_DEFAULT 0x80
+#define PLL_REF_CLK_FREQ (50 * 1000000)
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+
+#define PLL_PROFILING_FREQ_DEFAULT 38400000
+#define PLL_PROFILING_FREQ_HIGH 400000000
+
+#define TIM_SAFE_ENABLE 0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
+
+#define TIMEOUT_US (150 * USEC_PER_MSEC)
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+
+#define WEIGHTS_DEFAULT 0xf711f711u
+#define WEIGHTS_ATS_DEFAULT 0x0000f711u
+
+#define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR)))
+
+#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static char *ivpu_platform_to_str(u32 platform)
+{
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ return "IVPU_PLATFORM_SILICON";
+ case IVPU_PLATFORM_SIMICS:
+ return "IVPU_PLATFORM_SIMICS";
+ case IVPU_PLATFORM_FPGA:
+ return "IVPU_PLATFORM_FPGA";
+ default:
+ return "Invalid platform";
+ }
+}
+
+static const struct dmi_system_id ivpu_dmi_platform_simulation[] = {
+ {
+ .ident = "Intel Simics",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
+ },
+ },
+ {
+ .ident = "Intel Simics",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Simics"),
+ },
+ },
+ { }
+};
+
+static void ivpu_hw_read_platform(struct ivpu_device *vdev)
+{
+ if (dmi_check_system(ivpu_dmi_platform_simulation))
+ vdev->platform = IVPU_PLATFORM_SIMICS;
+ else
+ vdev->platform = IVPU_PLATFORM_SILICON;
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
+ ivpu_platform_to_str(vdev->platform), vdev->platform);
+}
+
+static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+{
+ vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.clear_runtime_mem = false;
+
+ if (ivpu_hw_gen(vdev) == IVPU_HW_40XX)
+ vdev->wa.disable_clock_relinquish = true;
+}
+
+static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+{
+ if (ivpu_is_fpga(vdev)) {
+ vdev->timeout.boot = 100000;
+ vdev->timeout.jsm = 50000;
+ vdev->timeout.tdr = 2000000;
+ vdev->timeout.reschedule_suspend = 1000;
+ } else if (ivpu_is_simics(vdev)) {
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 10000;
+ vdev->timeout.reschedule_suspend = 10;
+ } else {
+ vdev->timeout.boot = 1000;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 2000;
+ vdev->timeout.reschedule_suspend = 10;
+ }
+}
+
+static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
+ u16 target_ratio, u16 epp, u16 config, u16 cdyn)
+{
+ int ret;
+ u32 val;
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD);
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val);
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+}
+
+static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio;
+ u32 fmin_fuse, fmax_fuse;
+
+ fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE);
+ fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+ fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE);
+ fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+
+ hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
+ hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
+ hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+{
+ u16 config = enable ? PLL_CONFIG_DEFAULT : 0;
+ u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0;
+ u16 epp = enable ? PLL_EPP_DEFAULT : 0;
+ struct ivpu_hw_info *hw = vdev->hw;
+ u16 target_ratio = hw->pll.pn_ratio;
+ int ret;
+
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n",
+ PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn);
+
+ ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio,
+ target_ratio, epp, config, cdyn);
+ if (ret) {
+ ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
+ return ret;
+ }
+
+ if (enable) {
+ ret = ivpu_pll_wait_for_status_ready(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ivpu_pll_enable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, true);
+}
+
+static int ivpu_pll_disable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, false);
+}
+
+static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
+}
+
+static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
+}
+
+static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
+}
+
+static int ivpu_boot_host_ss_check(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_boot_noc_qreqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
+
+ ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+ return ret;
+ }
+
+ if (enable) {
+ REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
+ REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
+ }
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_axi_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ }
+ REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
+
+ ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_top_noc_drive(vdev, true);
+}
+
+static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+
+ if (enable)
+ ndelay(500);
+}
+
+static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
+
+ if (!enable)
+ ndelay(500);
+}
+
+static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_is_fpga(vdev))
+ return 0;
+
+ return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU,
+ exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_boot_pwr_island_trickle_drive(vdev, true);
+ ivpu_boot_pwr_island_drive(vdev, true);
+
+ ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for power island status\n");
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
+ return ret;
+ }
+
+ ivpu_boot_host_ss_clk_drive(vdev, true);
+ ivpu_boot_host_ss_rst_drive(vdev, true);
+ ivpu_boot_pwr_island_isolation_drive(vdev, false);
+
+ return ret;
+}
+
+static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+ REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
+
+ ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_soc_cpu_drive(vdev, true);
+}
+
+static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+ u64 val64;
+
+ ret = ivpu_boot_soc_cpu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
+ return ret;
+ }
+
+ val64 = vdev->fw->entry_point;
+ val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
+ REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
+
+ val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
+ REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
+
+ ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+ ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
+
+ return 0;
+}
+
+static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL);
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val);
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool ivpu_tile_disable_check(u32 config)
+{
+ /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
+ if (config == 0)
+ return true;
+
+ if (config > BIT(TILE_MAX_NUM - 1))
+ return false;
+
+ if ((config & (config - 1)) == 0)
+ return true;
+
+ return false;
+}
+
+static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 tile_disable;
+ u32 tile_enable;
+ u32 fuse;
+
+ fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
+ if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) {
+ ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
+ return -EIO;
+ }
+
+ tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse);
+ if (!ivpu_tile_disable_check(tile_disable)) {
+ ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable);
+ return -EIO;
+ }
+
+ if (tile_disable)
+ ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
+ TILE_MAX_NUM - 1, ffs(tile_disable) - 1);
+ else
+ ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
+
+ tile_enable = (~tile_disable) & TILE_MAX_MASK;
+
+ hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
+ hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
+ hw->tile_fuse = tile_disable;
+ hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+
+ ivpu_pll_init_frequency_ratios(vdev);
+
+ ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
+ ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
+ ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
+ ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+
+ return 0;
+}
+
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET);
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
+static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = ivpu_boot_d0i3_drive(vdev, true);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+ udelay(5); /* VPU requires 5 us to complete the transition */
+
+ return ret;
+}
+
+static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = ivpu_boot_d0i3_drive(vdev, false);
+ if (ret)
+ ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+ return ret;
+}
+
+static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev)
+{
+ u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+
+ if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
+ val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
+ else
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
+
+ REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
+}
+
+static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable");
+}
+
+static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev)
+{
+ u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
+}
+
+static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_hw_40xx_reset(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to reset HW: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_hw_read_platform(vdev);
+ ivpu_hw_wa_init(vdev);
+ ivpu_hw_timeouts_init(vdev);
+
+ ret = ivpu_hw_40xx_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+ ret = ivpu_pll_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
+ return ret;
+ }
+
+ if (IVPU_WA(disable_clock_relinquish))
+ ivpu_hw_40xx_clock_relinquish_disable(vdev);
+ ivpu_hw_40xx_profiling_freq_reg_set(vdev);
+ ivpu_hw_40xx_ats_print(vdev);
+
+ ret = ivpu_boot_host_ss_check(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_boot_idle_gen_drive(vdev, false);
+
+ ret = ivpu_boot_pwr_domain_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_axi_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_top_noc_enable(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_boot_no_snoop_enable(vdev);
+ ivpu_boot_tbu_mmu_enable(vdev);
+
+ ret = ivpu_boot_soc_cpu_boot(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
+
+ return ret;
+}
+
+static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return true;
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+ return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
+}
+
+static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+ ivpu_warn(vdev, "Failed to reset the VPU\n");
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_hw_40xx_d0i3_enable(vdev)) {
+ ivpu_err(vdev, "Failed to enter D0I3\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
+
+ val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
+ val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+/* Register indirect accesses */
+static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+ u32 pll_curr_ratio;
+
+ pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ);
+ pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK;
+
+ return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
+}
+
+static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
+ u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
+
+ REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+
+ return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
+
+static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev)
+{
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+}
+
+static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev)
+{
+ REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+ REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+}
+
+static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
+{
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
+}
+
+static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+ /* TODO: For LNN hang consider engine reset instead of full recovery */
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+ ivpu_hw_wdt_disable(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+/* Handler for IRQs from VPU core (irqV) */
+static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (!status)
+ return IRQ_NONE;
+
+ REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ ivpu_mmu_irq_evtq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ ret |= ivpu_ipc_irq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ ivpu_mmu_irq_gerr_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ ivpu_hw_40xx_irq_wdt_mss_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ ivpu_hw_40xx_irq_wdt_nce_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
+
+ return ret;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
+{
+ bool schedule_recovery = false;
+ u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
+ REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) {
+ ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
+ REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) {
+ ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
+ REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) {
+ ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
+ REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) {
+ ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
+ REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) {
+ ivpu_err(vdev, "Survivability error detected\n");
+ schedule_recovery = true;
+ }
+
+ if (schedule_recovery)
+ ivpu_pm_schedule_recovery(vdev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
+{
+ struct ivpu_device *vdev = ptr;
+ irqreturn_t ret = IRQ_NONE;
+
+ ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
+ ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+
+ if (ret & IRQ_WAKE_THREAD)
+ return IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
+{
+ u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+ if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev))
+ ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+ ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+ ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+ ivpu_err(vdev, "NOC Firewall irq detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) {
+ ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb))
+ ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb))
+ ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb))
+ ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb))
+ ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb))
+ ivpu_err(vdev, "Survivability error detected\n");
+}
+
+const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
+ .info_init = ivpu_hw_40xx_info_init,
+ .power_up = ivpu_hw_40xx_power_up,
+ .is_idle = ivpu_hw_40xx_is_idle,
+ .power_down = ivpu_hw_40xx_power_down,
+ .boot_fw = ivpu_hw_40xx_boot_fw,
+ .wdt_disable = ivpu_hw_40xx_wdt_disable,
+ .diagnose_failure = ivpu_hw_40xx_diagnose_failure,
+ .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
+ .reg_db_set = ivpu_hw_40xx_reg_db_set,
+ .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get,
+ .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get,
+ .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set,
+ .irq_clear = ivpu_hw_40xx_irq_clear,
+ .irq_enable = ivpu_hw_40xx_irq_enable,
+ .irq_disable = ivpu_hw_40xx_irq_disable,
+ .irq_handler = ivpu_hw_40xx_irq_handler,
+};
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
new file mode 100644
index 000000000000..5139cfe88532
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_40XX_REG_H__
+#define __IVPU_HW_40XX_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
+
+#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
+#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
+
+#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
+
+#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
+#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
+#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
+
+#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
+#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
+
+#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
+
+#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
+#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
+
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
+
+#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
+
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
+#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
+#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
+#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
+
+#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
+#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
+
+#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
+#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
+
+#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
+#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
+#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_CSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_CLK_SET 0x00000084u
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_EN 0x00000090u
+#define VPU_40XX_HOST_SS_CPR_RST_EN_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_EN_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_EN_CSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_SET 0x00000094u
+#define VPU_40XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_CLR 0x00000098u
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_HW_VERSION 0x00000108u
+#define VPU_40XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
+#define VPU_40XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
+#define VPU_40XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
+
+#define VPU_40XX_HOST_SS_SW_VERSION 0x0000010cu
+
+#define VPU_40XX_HOST_SS_GEN_CTRL 0x00000118u
+#define VPU_40XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
+
+#define VPU_40XX_HOST_SS_NOC_QREQN 0x00000154u
+#define VPU_40XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_NOC_QACCEPTN 0x00000158u
+#define VPU_40XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_NOC_QDENY 0x0000015cu
+#define VPU_40XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_TOP_NOC_QREQN 0x00000160u
+#define VPU_40XX_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
+
+#define VPU_40XX_TOP_NOC_QACCEPTN 0x00000164u
+#define VPU_40XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
+
+#define VPU_40XX_TOP_NOC_QDENY 0x00000168u
+#define VPU_40XX_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(2)
+
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
+
+#define VPU_40XX_HOST_SS_ICB_STATUS_0 0x00010210u
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
+
+#define VPU_40XX_HOST_SS_ICB_STATUS_1 0x00010214u
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
+
+#define VPU_40XX_HOST_SS_ICB_CLEAR_0 0x00010220u
+#define VPU_40XX_HOST_SS_ICB_CLEAR_1 0x00010224u
+#define VPU_40XX_HOST_SS_ICB_ENABLE_0 0x00010240u
+#define VPU_40XX_HOST_SS_ICB_ENABLE_1 0x00010244u
+
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
+
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
+#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
+
+#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
+#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
+
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
+
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_SNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AW_SNOOP_OVERRIDE_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AR_SNOOP_OVERRIDE_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
+
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
+
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
+
+#define VPU_40XX_CPU_SS_TIM_WATCHDOG 0x0102009cu
+#define VPU_40XX_CPU_SS_TIM_WDOG_EN 0x010200a4u
+#define VPU_40XX_CPU_SS_TIM_SAFE 0x010200a8u
+
+#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG 0x01021008u
+#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QREQN 0x01010030u
+#define VPU_40XX_CPU_SS_CPR_NOC_QREQN_TOP_MMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN 0x01010034u
+#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN_TOP_MMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QDENY 0x01010038u
+#define VPU_40XX_CPU_SS_CPR_NOC_QDENY_TOP_MMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_TIM_IPC_FIFO 0x010200f0u
+#define VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT 0x01029008u
+
+#define VPU_40XX_CPU_SS_DOORBELL_0 0x01300000u
+#define VPU_40XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_DOORBELL_1 0x01301000u
+
+#endif /* __IVPU_HW_40XX_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
deleted file mode 100644
index 593b8ff07417..000000000000
--- a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-
-#ifndef __IVPU_HW_MTL_REG_H__
-#define __IVPU_HW_MTL_REG_H__
-
-#include <linux/bits.h>
-
-#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u
-
-#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u
-#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
-#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
-
-#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u
-#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
-
-#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u
-#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
-
-#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu
-#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
-
-#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u
-
-#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u
-#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
-#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
-
-#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u
-#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
-
-#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu
-#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
-
-#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u
-#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
-
-#define MTL_BUTTRESS_PLL_STATUS 0x00000040u
-#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
-
-#define MTL_BUTTRESS_VPU_STATUS 0x00000044u
-#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
-
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
-
-#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u
-#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
-
-#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
-#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
-#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
-
-#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
-#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
-#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
-
-#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u
-#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
-#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
-#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
-
-#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
-
-#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u
-#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u
-#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
-#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
-#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
-
-#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u
-#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
-
-#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u
-#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u
-#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu
-#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
-
-#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
-#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
-
-#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
-#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
-
-#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
-#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
-
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
-
-#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
-
-#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
-
-#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u
-#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u
-#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u
-
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
-
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
-#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
-#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u
-#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
-
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
-
-#define MTL_VPU_HOST_MMU_IDR0 0x00200000u
-#define MTL_VPU_HOST_MMU_IDR1 0x00200004u
-#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu
-#define MTL_VPU_HOST_MMU_IDR5 0x00200014u
-#define MTL_VPU_HOST_MMU_CR0 0x00200020u
-#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u
-#define MTL_VPU_HOST_MMU_CR1 0x00200028u
-#define MTL_VPU_HOST_MMU_CR2 0x0020002cu
-#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u
-#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u
-
-#define MTL_VPU_HOST_MMU_GERROR 0x00200060u
-#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
-
-#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u
-
-#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u
-#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
-#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u
-#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u
-#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu
-#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u
-#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u
-#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu
-#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
-#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
-
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
-
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
-
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
-
-#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
-#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
-#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
-#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
-
-#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
-#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
-
-#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
-#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
-
-#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
-
-#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index d45be0615b47..de9e69f70af7 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -289,15 +289,13 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
- size_t buf_size;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return NULL;
- buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *);
- job = kzalloc(buf_size, GFP_KERNEL);
+ job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job)
goto err_rpm_put;
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index b8b259b3aa63..baefaf7bb3cb 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -7,7 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
-#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@@ -143,6 +143,16 @@
#define IVPU_MMU_CD_0_ASET BIT(47)
#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
+#define IVPU_MMU_T0SZ_48BIT 16
+#define IVPU_MMU_T0SZ_38BIT 26
+
+#define IVPU_MMU_IPS_48BIT 5
+#define IVPU_MMU_IPS_44BIT 4
+#define IVPU_MMU_IPS_42BIT 3
+#define IVPU_MMU_IPS_40BIT 2
+#define IVPU_MMU_IPS_36BIT 1
+#define IVPU_MMU_IPS_32BIT 0
+
#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
@@ -176,13 +186,13 @@
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
-#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
+#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd)
{
@@ -240,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR0_REF;
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
@@ -259,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR5_REF;
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
}
@@ -386,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
if (ret)
return ret;
- return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
+ return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
- return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
+ return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
}
@@ -437,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
- REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
+ REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret)
@@ -485,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
if (ret)
return ret;
@@ -495,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
- REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
+ REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
- REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
- REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
+ REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
+ REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
- REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
- REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
- REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
+ REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
+ REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
+ REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@@ -521,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
- REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
- REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
+ REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@@ -540,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
- return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
@@ -617,12 +627,12 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
if (cd_dma != 0) {
- cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) |
+ cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
IVPU_MMU_CD_0_TCR_EPD1 |
IVPU_MMU_CD_0_AA64 |
@@ -791,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
- evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
+ evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
- REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
@@ -831,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
ivpu_dbg(vdev, IRQ, "MMU error\n");
- gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
- gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
+ gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
+ gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return;
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
- REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
+ REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 8ce9b12ac356..1d2e554e2c4a 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -11,10 +11,12 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
-#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30)
+#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
+#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
-#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0)
+#define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))
+#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
@@ -22,10 +24,13 @@
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
-#define IVPU_MMU_PAGE_SIZE SZ_4K
-#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
-#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
-#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
+#define IVPU_MMU_PAGE_SIZE SZ_4K
+#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
+#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
+#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
+#define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
+#define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
+#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
@@ -36,167 +41,268 @@
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
dma_addr_t pgd_dma;
- u64 *pgd;
- pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL);
- if (!pgd)
+ pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
+ GFP_KERNEL);
+ if (!pgtable->pgd_dma_ptr)
return -ENOMEM;
- pgtable->pgd = pgd;
pgtable->pgd_dma = pgd_dma;
return 0;
}
-static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
{
- int pgd_index, pmd_index;
+ if (cpu_addr)
+ dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
+ dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+}
+
+static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ int pgd_idx, pud_idx, pmd_idx;
+ dma_addr_t pud_dma, pmd_dma, pte_dma;
+ u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
- for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) {
- u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index];
- u64 *pmd = pgtable->pgd_entries[pgd_index];
+ for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
+ pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
+ pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
- if (!pmd_entries)
+ if (!pud_dma_ptr)
continue;
- for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) {
- if (pmd_entries[pmd_index])
- dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE,
- pmd_entries[pmd_index],
- pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
+ pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
+ pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
+
+ if (!pmd_dma_ptr)
+ continue;
+
+ for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
+ pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
+ pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
+
+ ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
+ }
+
+ kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
+ ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
}
- kfree(pmd_entries);
- dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index],
- pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ kfree(pgtable->pmd_ptrs[pgd_idx]);
+ kfree(pgtable->pte_ptrs[pgd_idx]);
+ ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
}
- dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd,
- pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+}
+
+static u64*
+ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
+{
+ u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
+ dma_addr_t pud_dma;
+
+ if (pud_dma_ptr)
+ return pud_dma_ptr;
+
+ pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
+ if (!pud_dma_ptr)
+ return NULL;
+
+ drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
+ pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+ if (!pgtable->pmd_ptrs[pgd_idx])
+ goto err_free_pud_dma_ptr;
+
+ drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
+ pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+ if (!pgtable->pte_ptrs[pgd_idx])
+ goto err_free_pmd_ptrs;
+
+ pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
+ pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pud_dma_ptr;
+
+err_free_pmd_ptrs:
+ kfree(pgtable->pmd_ptrs[pgd_idx]);
+
+err_free_pud_dma_ptr:
+ ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
+ return NULL;
}
static u64*
-ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index)
+ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
+ int pud_idx)
{
- u64 **pmd_entries;
+ u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
dma_addr_t pmd_dma;
- u64 *pmd;
- if (pgtable->pgd_entries[pgd_index])
- return pgtable->pgd_entries[pgd_index];
+ if (pmd_dma_ptr)
+ return pmd_dma_ptr;
- pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
- if (!pmd)
+ pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+ if (!pmd_dma_ptr)
return NULL;
- pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
- if (!pmd_entries)
- goto err_free_pgd;
+ drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
+ pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+ if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
+ goto err_free_pmd_dma_ptr;
- pgtable->pgd_entries[pgd_index] = pmd;
- pgtable->pgd_cpu_entries[pgd_index] = pmd_entries;
- pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID;
+ pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
+ pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
- return pmd;
+ return pmd_dma_ptr;
-err_free_pgd:
- dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma);
+err_free_pmd_dma_ptr:
+ ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
return NULL;
}
static u64*
ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
- int pgd_index, int pmd_index)
+ int pgd_idx, int pud_idx, int pmd_idx)
{
+ u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
dma_addr_t pte_dma;
- u64 *pte;
- if (pgtable->pgd_cpu_entries[pgd_index][pmd_index])
- return pgtable->pgd_cpu_entries[pgd_index][pmd_index];
+ if (pte_dma_ptr)
+ return pte_dma_ptr;
- pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
- if (!pte)
+ pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+ if (!pte_dma_ptr)
return NULL;
- pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte;
- pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID;
+ pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
+ pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
- return pte;
+ return pte_dma_ptr;
}
static int
ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, dma_addr_t dma_addr, int prot)
+ u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
{
u64 *pte;
- int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
- int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
- int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+ int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+ int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Allocate PUD - second level page table if needed */
+ if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
+ return -ENOMEM;
- /* Allocate PMD - second level page table if needed */
- if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index))
+ /* Allocate PMD - third level page table if needed */
+ if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
return -ENOMEM;
- /* Allocate PTE - third level page table if needed */
- pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index);
+ /* Allocate PTE - fourth level page table if needed */
+ pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
if (!pte)
return -ENOMEM;
- /* Update PTE - third level page table with DMA address */
- pte[pte_index] = dma_addr | prot;
+ /* Update PTE */
+ pte[pte_idx] = dma_addr | prot;
+
+ return 0;
+}
+
+static int
+ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
+ dma_addr_t dma_addr, u64 prot)
+{
+ size_t size = IVPU_MMU_CONT_PAGES_SIZE;
+
+ drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
+ drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
+
+ prot |= IVPU_MMU_ENTRY_FLAG_CONT;
+
+ while (size) {
+ int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+
+ if (ret)
+ return ret;
+
+ size -= IVPU_MMU_PAGE_SIZE;
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ dma_addr += IVPU_MMU_PAGE_SIZE;
+ }
return 0;
}
static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
{
- int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
- int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
- int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+ int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+ int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
/* Update PTE with dummy physical address and clear flags */
- ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID;
+ ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
}
static void
ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
+ struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
u64 end_addr = vpu_addr + size;
- u64 *pgd = ctx->pgtable.pgd;
/* Align to PMD entry (2 MB) */
vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
while (vpu_addr < end_addr) {
- int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
- u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
- u64 *pmd = ctx->pgtable.pgd_entries[pgd_index];
-
- while (vpu_addr < end_addr && vpu_addr < pmd_end) {
- int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
- u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index];
-
- clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE);
- vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+ int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
+
+ while (vpu_addr < end_addr && vpu_addr < pud_end) {
+ int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+ u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
+
+ while (vpu_addr < end_addr && vpu_addr < pmd_end) {
+ int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+
+ clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
+ IVPU_MMU_PGTABLE_SIZE);
+ vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+ }
+ clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
+ IVPU_MMU_PGTABLE_SIZE);
}
- clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE);
+ clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
}
- clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE);
+ clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
}
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot)
+ u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
{
+ int map_size;
+ int ret;
+
while (size) {
- int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+ if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
+ IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
+ ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
+ map_size = IVPU_MMU_CONT_PAGES_SIZE;
+ } else {
+ ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+ map_size = IVPU_MMU_PAGE_SIZE;
+ }
if (ret)
return ret;
- vpu_addr += IVPU_MMU_PAGE_SIZE;
- dma_addr += IVPU_MMU_PAGE_SIZE;
- size -= IVPU_MMU_PAGE_SIZE;
+ vpu_addr += map_size;
+ dma_addr += map_size;
+ size -= map_size;
}
return 0;
@@ -216,8 +322,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{
struct scatterlist *sg;
- int prot;
int ret;
+ u64 prot;
u64 i;
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
@@ -237,7 +343,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
- u64 dma_addr = sg_dma_address(sg) - sg->offset;
+ dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
@@ -293,8 +399,14 @@ ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
{
lockdep_assert_held(&ctx->lock);
- return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE,
- 0, range->start, range->end, DRM_MM_INSERT_BEST);
+ if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
+ if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
+ range->start, range->end, DRM_MM_INSERT_BEST))
+ return 0;
+ }
+
+ return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
+ range->start, range->end, DRM_MM_INSERT_BEST);
}
void
@@ -319,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret;
if (!context_id) {
- start = vdev->hw->ranges.global_low.start;
- end = vdev->hw->ranges.global_high.end;
+ start = vdev->hw->ranges.global.start;
+ end = vdev->hw->ranges.shave.end;
} else {
- start = vdev->hw->ranges.user_low.start;
- end = vdev->hw->ranges.user_high.end;
+ start = vdev->hw->ranges.user.start;
+ end = vdev->hw->ranges.dma.end;
}
drm_mm_init(&ctx->mm, start, end - start);
@@ -334,11 +446,15 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
- drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd);
+ if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
+ return;
mutex_destroy(&ctx->lock);
- ivpu_mmu_pgtable_free(vdev, &ctx->pgtable);
+ ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
+
+ ctx->pgtable.pgd_dma_ptr = NULL;
+ ctx->pgtable.pgd_dma = 0;
}
int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index ddf11b95023a..961a0d6a6c7f 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -12,12 +12,13 @@ struct ivpu_device;
struct ivpu_file_priv;
struct ivpu_addr_range;
-#define IVPU_MMU_PGTABLE_ENTRIES 512
+#define IVPU_MMU_PGTABLE_ENTRIES 512ull
struct ivpu_mmu_pgtable {
- u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES];
- u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES];
- u64 *pgd;
+ u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 *pgd_dma_ptr;
dma_addr_t pgd_dma;
};
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index aa4d56dc52b3..e6f27daf5560 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -259,6 +259,7 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
pm_runtime_get_sync(vdev->drm.dev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
+ atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1);
ivpu_shutdown(vdev);
ivpu_pm_prepare_cold_boot(vdev);
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index baca98187255..fd4eada1290f 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -14,6 +14,7 @@ struct ivpu_pm_info {
struct ivpu_device *vdev;
struct work_struct recovery_work;
atomic_t in_reset;
+ atomic_t reset_counter;
bool is_warmboot;
u32 suspend_reschedule_counter;
};