summaryrefslogtreecommitdiff
path: root/drivers/crypto/intel/qat
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/intel/qat')
-rw-r--r--drivers/crypto/intel/qat/Kconfig11
-rw-r--r--drivers/crypto/intel/qat/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c528
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h55
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c202
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c339
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h52
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c277
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h16
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.h4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c27
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c287
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c238
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h87
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c153
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h158
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c288
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c710
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h117
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c1
40 files changed, 3241 insertions, 566 deletions
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 1220cc86f910..c120f6715a09 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX
To compile this as a module, choose M here: the module
will be called qat_4xxx.
+config CRYPTO_DEV_QAT_420XX
+ tristate "Support for Intel(R) QAT_420XX"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_420xx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_420xx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 258c8a626ce0..235b69f4f3f7 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
new file mode 100644
index 000000000000..a90fbe00b3c8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
+qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
new file mode 100644
index 000000000000..a87d29ae724f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/iopoll.h>
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen4_config.h>
+#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
+#include <adf_gen4_ras.h>
+#include <adf_gen4_timer.h>
+#include <adf_gen4_tl.h>
+#include "adf_420xx_hw_data.h"
+#include "icp_qat_hw.h"
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 GENMASK(11, 8)
+#define ADF_AE_GROUP_3 GENMASK(15, 12)
+#define ADF_AE_GROUP_4 BIT(16)
+
+#define ENA_THD_MASK_ASYM GENMASK(1, 0)
+#define ENA_THD_MASK_SYM GENMASK(3, 0)
+#define ENA_THD_MASK_DC GENMASK(1, 0)
+
+static const char * const adf_420xx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_fw_cy_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+ {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dcc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+
+static struct adf_hw_device_class adf_420xx_class = {
+ .name = ADF_420XX_DEVICE_NAME,
+ .type = DEV_420XX,
+ .instances = 0,
+};
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 me_disable = self->fuses;
+
+ return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return ARRAY_SIZE(adf_fw_cy_config);
+ case SVC_DC:
+ return ARRAY_SIZE(adf_fw_dc_config);
+ case SVC_DCC:
+ return ARRAY_SIZE(adf_fw_dcc_config);
+ case SVC_SYM:
+ return ARRAY_SIZE(adf_fw_sym_config);
+ case SVC_ASYM:
+ return ARRAY_SIZE(adf_fw_asym_config);
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return ARRAY_SIZE(adf_fw_asym_dc_config);
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return ARRAY_SIZE(adf_fw_sym_dc_config);
+ default:
+ return 0;
+ }
+}
+
+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+{
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return adf_fw_cy_config;
+ case SVC_DC:
+ return adf_fw_dc_config;
+ case SVC_DCC:
+ return adf_fw_dcc_config;
+ case SVC_SYM:
+ return adf_fw_sym_config;
+ case SVC_ASYM:
+ return adf_fw_asym_config;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return adf_fw_asym_dc_config;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return adf_fw_sym_dc_config;
+ default:
+ return NULL;
+ }
+}
+
+static void update_ae_mask(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ const struct adf_fw_config *fw_config;
+ u32 config_ae_mask = 0;
+ u32 ae_mask, num_objs;
+ int i;
+
+ ae_mask = get_ae_mask(hw_data);
+
+ /* Modify the AE mask based on the firmware configuration loaded */
+ fw_config = get_fw_config(accel_dev);
+ num_objs = uof_get_num_objs(accel_dev);
+
+ config_ae_mask |= ADF_420XX_ADMIN_AE_MASK;
+ for (i = 0; i < num_objs; i++)
+ config_ae_mask |= fw_config[i].ae_mask;
+
+ hw_data->ae_mask = ae_mask & config_ae_mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_dcc;
+ u32 fusectl1;
+
+ /* As a side effect, update ae_mask based on configuration */
+ update_ae_mask(accel_dev);
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
+
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SM4 |
+ ICP_ACCEL_CAPABILITIES_AES_V2 |
+ ICP_ACCEL_CAPABILITIES_ZUC |
+ ICP_ACCEL_CAPABILITIES_ZUC_256 |
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
+
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
+ case SVC_DC:
+ return capabilities_dc;
+ case SVC_DCC:
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ capabilities_dcc = capabilities_dc | capabilities_sym;
+ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ return capabilities_dcc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+ return capabilities_asym;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return capabilities_asym | capabilities_dc;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return capabilities_sym | capabilities_dc;
+ default:
+ return 0;
+ }
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen4_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Generate of the thread to arbiter map failed");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
+
+ rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
+ rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+}
+
+static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
+{
+ switch (ae_mask) {
+ case ADF_AE_GROUP_0:
+ return RP_GROUP_0;
+ case ADF_AE_GROUP_1:
+ case ADF_AE_GROUP_3:
+ return RP_GROUP_1;
+ case ADF_AE_GROUP_2:
+ if (get_fw_config(accel_dev) == adf_fw_cy_config)
+ return RP_GROUP_0;
+ else
+ return RP_GROUP_1;
+ default:
+ dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
+ return -EINVAL;
+ }
+}
+
+static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
+ const struct adf_fw_config *fw_config;
+ u16 ring_to_svc_map;
+ int i, j;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ for (i = 0; i < RP_GROUP_COUNT; i++) {
+ switch (fw_config[i].ae_mask) {
+ case ADF_AE_GROUP_0:
+ j = RP_GROUP_0;
+ break;
+ case ADF_AE_GROUP_1:
+ j = RP_GROUP_1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (fw_config[i].obj) {
+ case ADF_FW_SYM_OBJ:
+ rps[j] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[j] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[j] = COMP;
+ break;
+ default:
+ rps[j] = 0;
+ break;
+ }
+ }
+
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ const char * const fw_objs[], int num_objs)
+{
+ const struct adf_fw_config *fw_config;
+ int id;
+
+ fw_config = get_fw_config(accel_dev);
+ if (fw_config)
+ id = fw_config[obj_num].obj;
+ else
+ id = -EINVAL;
+
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+ return fw_objs[id];
+}
+
+static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ return fw_config[obj_num].ae_mask;
+}
+
+static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
+{
+ dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK;
+ dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK;
+ dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
+ dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
+ dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
+ dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
+}
+
+void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
+{
+ hw_data->dev_class = &adf_420xx_class;
+ hw_data->instance_id = adf_420xx_class.instances++;
+ hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_gen4_enable_error_correction;
+ hw_data->get_accel_mask = adf_gen4_get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = adf_gen4_get_num_accels;
+ hw_data->get_num_aes = adf_gen4_get_num_aes;
+ hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
+ hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
+ hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
+ hw_data->get_arb_info = adf_gen4_get_arb_info;
+ hw_data->get_admin_info = adf_gen4_get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = adf_gen4_get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_gen4_enable_ints;
+ hw_data->init_device = adf_gen4_init_device;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK;
+ hw_data->num_rps = ADF_GEN4_MAX_RPS;
+ hw_data->fw_name = ADF_420XX_FW;
+ hw_data->fw_mmp_name = ADF_420XX_MMP;
+ hw_data->uof_get_name = uof_get_name_420xx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->get_rp_group = get_rp_group;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+ hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->clock_frequency = ADF_420XX_AE_FREQ;
+
+ adf_gen4_set_err_mask(&hw_data->dev_err_mask);
+ adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen4_init_dc_ops(&hw_data->dc_ops);
+ adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_init_rl_data(&hw_data->rl_data);
+}
+
+void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
new file mode 100644
index 000000000000..99abbfc14820
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_420XX_HW_DATA_H_
+#define ADF_420XX_HW_DATA_H_
+
+#include <adf_accel_devices.h>
+
+#define ADF_420XX_MAX_ACCELENGINES 17
+
+#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF
+#define ADF_420XX_ADMIN_AE_MASK 0x10000
+
+#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF)
+#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF)
+#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001)
+#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007)
+#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF)
+#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF)
+
+/*
+ * SSMFEATREN bit mask
+ * BIT(4) - enables parity detection on CPP
+ * BIT(12) - enables the logging of push/pull data errors
+ * in pperr register
+ * BIT(16) - BIT(27) - enable parity detection on SPPs
+ */
+#define ADF_420XX_SSMFEATREN_MASK \
+ (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27))
+
+/* Firmware Binaries */
+#define ADF_420XX_FW "qat_420xx.bin"
+#define ADF_420XX_MMP "qat_420xx_mmp.bin"
+#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin"
+#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin"
+#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin"
+#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin"
+
+/* RL constants */
+#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_420XX_RL_DCPR_CORRECTION 1
+#define ADF_420XX_RL_SCANS_PER_SEC 954
+#define ADF_420XX_RL_MAX_TP_ASYM 173750UL
+#define ADF_420XX_RL_MAX_TP_SYM 95000UL
+#define ADF_420XX_RL_MAX_TP_DC 40000UL
+#define ADF_420XX_RL_SLICE_REF 1000UL
+
+/* Clocks frequency */
+#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ)
+
+void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id);
+void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
new file mode 100644
index 000000000000..2a3598409eeb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <adf_accel_devices.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_config.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_420xx_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->hw_device) {
+ adf_clean_hw_data_420xx(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ struct adf_bar *bar;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /*
+ * Add accel device to accel table
+ * This should be called before adf_cleanup_accel is called
+ */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and initialise device hardware meta-data structure */
+ hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_420xx(accel_dev->hw_device, ent->device);
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found.\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable PCI device.\n");
+ goto out_err;
+ }
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto out_err;
+ }
+
+ ret = adf_gen4_cfg_dev_init(accel_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+ goto out_err;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Find and map all the device's BARS */
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
+
+ ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ goto out_err;
+ }
+
+ i = 0;
+ for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
+ bar = &accel_pci_dev->pci_bars[i++];
+ bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->ras_errors.enabled = true;
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ ret = adf_sysfs_init(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+}
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_420XX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_420XX_FW);
+MODULE_FIRMWARE(ADF_420XX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 0faedb5b2eb5..479062aa5e6b 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -7,12 +7,15 @@
#include <adf_cfg_services.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
+#include <adf_gen4_tl.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -20,12 +23,10 @@
#define ADF_AE_GROUP_1 GENMASK(7, 4)
#define ADF_AE_GROUP_2 BIT(8)
-enum adf_fw_objs {
- ADF_FW_SYM_OBJ,
- ADF_FW_ASYM_OBJ,
- ADF_FW_DC_OBJ,
- ADF_FW_ADMIN_OBJ,
-};
+#define ENA_THD_MASK_ASYM GENMASK(1, 0)
+#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0)
+#define ENA_THD_MASK_SYM GENMASK(6, 0)
+#define ENA_THD_MASK_DC GENMASK(1, 0)
static const char * const adf_4xxx_fw_objs[] = {
[ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
@@ -41,11 +42,6 @@ static const char * const adf_402xx_fw_objs[] = {
[ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
};
-struct adf_fw_config {
- u32 ae_mask;
- enum adf_fw_objs obj;
-};
-
static const struct adf_fw_config adf_fw_cy_config[] = {
{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
@@ -95,59 +91,12 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config))
static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
-/* Worker thread to service arbiter mappings */
-static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x5555555, 0x5555555, 0x5555555, 0x5555555,
- 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
- 0x0
-};
-
-static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
- 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
- 0x0
-};
-
-static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
- 0x0
-};
-
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
.instances = 0,
};
-static int get_service_enabled(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- ADF_SERVICES_ENABLED " param not found\n");
- return ret;
- }
-
- ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
- services);
- if (ret < 0)
- dev_err(&GET_DEV(accel_dev),
- "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
- services);
-
- return ret;
-}
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_ACCELERATORS_MASK;
-}
-
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
u32 me_disable = self->fuses;
@@ -155,55 +104,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- if (!self || !self->ae_mask)
- return 0;
-
- return hweight32(self->ae_mask);
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_SRAM_BAR;
-}
-
-/*
- * The vector routing table is used to select the MSI-X entry to use for each
- * interrupt source.
- * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
- * The final entry corresponds to VF2PF or error interrupts.
- * This vector table could be used to configure one MSI-X entry to be shared
- * between multiple interrupt sources.
- *
- * The default routing is set to have a one to one correspondence between the
- * interrupt source and the MSI-X entry used.
- */
-static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
-{
- void __iomem *csr;
- int i;
-
- csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
- for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
- ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
-}
-
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
@@ -212,7 +112,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
u32 fusectl1;
/* Read accelerator capabilities mask */
- pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
@@ -227,27 +127,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
@@ -257,7 +157,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM2 |
ICP_ACCEL_CAPABILITIES_ECEDMONT;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
@@ -268,14 +168,14 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
}
- switch (get_service_enabled(accel_dev)) {
+ switch (adf_get_service_enabled(accel_dev)) {
case SVC_CY:
case SVC_CY2:
return capabilities_sym | capabilities_asym;
@@ -304,43 +204,13 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
}
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
- return DEV_SKU_1;
-}
-
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
- switch (get_service_enabled(accel_dev)) {
- case SVC_DC:
- return thrd_to_arb_map_dc;
- case SVC_DCC:
- return thrd_to_arb_map_dcc;
- default:
- return default_thrd_to_arb_map;
- }
-}
-
-static void get_arb_info(struct arb_info *arb_info)
-{
- arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
- arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
- arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
-}
+ if (adf_gen4_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Generate of the thread to arbiter map failed");
-static void get_admin_info(struct admin_info *admin_csrs_info)
-{
- admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
- admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
- admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
-}
-
-static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
-{
- /*
- * 4XXX uses KPT counter for HB
- */
- return ADF_4XXX_KPT_COUNTER_FREQ;
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
@@ -361,66 +231,14 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
}
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
- void __iomem *csr = misc_bar->virt_addr;
-
- /* Enable all in errsou3 except VFLR notification on host */
- ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
-}
-
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
- /* Enable bundle interrupts */
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
-
- /* Enable misc interrupts */
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
-}
-
-static int adf_init_device(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
- u32 status;
- u32 csr;
- int ret;
-
- addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
- /* Temporarily mask PM interrupt */
- csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
- csr |= ADF_GEN4_PM_SOU;
- ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
-
- /* Set DRV_ACTIVE bit to power up the device */
- ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
-
- /* Poll status register to make sure the device is powered up */
- ret = read_poll_timeout(ADF_CSR_RD, status,
- status & ADF_GEN4_PM_INIT_STATE,
- ADF_GEN4_PM_POLL_DELAY_US,
- ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
- ADF_GEN4_PM_STATUS);
- if (ret)
- dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
-
- return ret;
-}
-
-static u32 uof_get_num_objs(void)
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
return ARRAY_SIZE(adf_fw_cy_config);
}
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
- switch (get_service_enabled(accel_dev)) {
+ switch (adf_get_service_enabled(accel_dev)) {
case SVC_CY:
case SVC_CY2:
return adf_fw_cy_config;
@@ -443,11 +261,64 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
}
}
-enum adf_rp_groups {
- RP_GROUP_0 = 0,
- RP_GROUP_1,
- RP_GROUP_COUNT
-};
+static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
+{
+ switch (ae_mask) {
+ case ADF_AE_GROUP_0:
+ return RP_GROUP_0;
+ case ADF_AE_GROUP_1:
+ return RP_GROUP_1;
+ default:
+ dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
+ return -EINVAL;
+ }
+}
+
+static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
+
+static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM_401XX;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
{
@@ -553,54 +424,63 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
- hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
- hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
- hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
- hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
+ hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
hw_data->num_logical_accel = 1;
- hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
- hw_data->get_accel_mask = get_accel_mask;
+ hw_data->enable_error_correction = adf_gen4_enable_error_correction;
+ hw_data->get_accel_mask = adf_gen4_get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
- hw_data->get_sram_bar_id = get_sram_bar_id;
- hw_data->get_etr_bar_id = get_etr_bar_id;
- hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_arb_info = get_arb_info;
- hw_data->get_admin_info = get_admin_info;
+ hw_data->get_num_accels = adf_gen4_get_num_accels;
+ hw_data->get_num_aes = adf_gen4_get_num_aes;
+ hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
+ hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
+ hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
+ hw_data->get_arb_info = adf_gen4_get_arb_info;
+ hw_data->get_admin_info = adf_gen4_get_admin_info;
hw_data->get_accel_cap = get_accel_cap;
- hw_data->get_sku = get_sku;
+ hw_data->get_sku = adf_gen4_get_sku;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
- hw_data->init_device = adf_init_device;
+ hw_data->enable_ints = adf_gen4_enable_ints;
+ hw_data->init_device = adf_gen4_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
case ADF_402XX_PCI_DEVICE_ID:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
break;
-
+ case ADF_401XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_4xxx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx;
+ break;
default:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ break;
}
hw_data->uof_get_num_objs = uof_get_num_objs;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
- hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->get_rp_group = get_rp_group;
+ hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
@@ -610,7 +490,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->dev_config = adf_gen4_dev_config;
hw_data->start_timer = adf_gen4_timer_start;
hw_data->stop_timer = adf_gen4_timer_stop;
- hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
@@ -619,6 +499,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_gen4_init_tl_data(&hw_data->tl_data);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index 33423295e90f..76388363ea87 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -6,25 +6,8 @@
#include <linux/units.h>
#include <adf_accel_devices.h>
-/* PCIe configuration space */
-#define ADF_4XXX_SRAM_BAR 0
-#define ADF_4XXX_PMISC_BAR 1
-#define ADF_4XXX_ETR_BAR 2
-#define ADF_4XXX_RX_RINGS_OFFSET 1
-#define ADF_4XXX_TX_RINGS_MASK 0x1
-#define ADF_4XXX_MAX_ACCELERATORS 1
#define ADF_4XXX_MAX_ACCELENGINES 9
-#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
-/* Physical function fuses */
-#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8)
-#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC)
-#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0)
-#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4)
-#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8)
-#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC)
-
-#define ADF_4XXX_ACCELERATORS_MASK (0x1)
#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
#define ADF_4XXX_ADMIN_AE_MASK (0x100)
@@ -45,28 +28,6 @@
(BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \
BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23))
-#define ADF_4XXX_ETR_MAX_BANKS 64
-
-/* MSIX interrupt */
-#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040)
-#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044)
-#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084)
-#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
-
-/* Bank and ring configuration */
-#define ADF_4XXX_NUM_RINGS_PER_BANK 2
-#define ADF_4XXX_NUM_BANKS_PER_VF 4
-
-/* Arbiter configuration */
-#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
-#define ADF_4XXX_ARB_OFFSET (0x0)
-#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400)
-
-/* Admin Interface Reg Offset */
-#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574)
-#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
-#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
-
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
@@ -93,22 +54,9 @@
#define ADF_4XXX_RL_SLICE_REF 1000UL
/* Clocks frequency */
-#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ)
-/* qat_4xxx fuse bits are different from old GENs, redefine them */
-enum icp_qat_4xxx_slice_mask {
- ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
- ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
- ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
- ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
- ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
- ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
- ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
-};
-
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 8f483d1197dd..9762f2bf7727 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -8,13 +8,10 @@
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dbgfs.h>
-#include <adf_heartbeat.h>
+#include <adf_gen4_config.h>
+#include <adf_gen4_hw_data.h>
#include "adf_4xxx_hw_data.h"
-#include "adf_cfg_services.h"
-#include "qat_compression.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
@@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
-static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
- const char *config;
- int ret;
-
- config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
-
- ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
- if (ret)
- return ret;
-
- /* Default configuration is crypto only for even devices
- * and compression for odd devices
- */
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, config,
- ADF_STR);
- if (ret)
- return ret;
-
- adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
-
- return 0;
-}
-
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long bank, val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_crypto(accel_dev))
- instances = min(cpus, banks / 2);
- else
- instances = 0;
-
- for (i = 0; i < instances; i++) {
- val = i;
- bank = i * 2;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &bank, ADF_DEC);
- if (ret)
- goto err;
-
- bank += 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &bank, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
- i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
- val = 128;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
- return ret;
-}
-
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_compression(accel_dev))
- instances = min(cpus, banks);
- else
- instances = 0;
-
- for (i = 0; i < instances; i++) {
- val = i;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
- return ret;
-}
-
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
-{
- unsigned long val;
- int ret;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- return ret;
-
- return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
-}
-
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
- if (ret)
- goto err;
-
- ret = adf_cfg_section_add(accel_dev, "Accelerator0");
- if (ret)
- goto err;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret)
- goto err;
-
- ret = sysfs_match_string(adf_cfg_services, services);
- if (ret < 0)
- goto err;
-
- switch (ret) {
- case SVC_CY:
- case SVC_CY2:
- ret = adf_crypto_dev_config(accel_dev);
- break;
- case SVC_DC:
- case SVC_DCC:
- ret = adf_comp_dev_config(accel_dev);
- break;
- default:
- ret = adf_no_dev_config(accel_dev);
- break;
- }
-
- if (ret)
- goto err;
-
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
- return ret;
-
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
- return ret;
-}
-
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
@@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
@@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- ret = adf_cfg_dev_init(accel_dev);
+ ret = adf_gen4_cfg_dev_init(accel_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize configuration.\n");
goto out_err;
@@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Find and map all the device's BARS */
- bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
if (ret) {
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 779a8aa0b8d2..6908727bff3b 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_ras_counters.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
+ adf_gen4_config.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
@@ -40,9 +41,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
adf_cnv_dbgfs.o \
adf_gen4_pm_debugfs.o \
+ adf_gen4_tl.o \
adf_heartbeat.o \
adf_heartbeat_dbgfs.o \
adf_pm_dbgfs.o \
+ adf_telemetry.o \
+ adf_tl_debugfs.o \
adf_dbgfs.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 4ff5729a3496..a16c7e6edc65 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -6,11 +6,14 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
+#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
+#include "icp_qat_hw.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
@@ -19,12 +22,15 @@
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
+#define ADF_420XX_DEVICE_NAME "420xx"
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
#define ADF_402XX_PCI_DEVICE_ID 0x4944
#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
+#define ADF_420XX_PCI_DEVICE_ID 0x4946
+#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -92,6 +98,7 @@ enum ras_errors {
struct adf_error_counters {
atomic_t counter[ADF_RAS_ERRORS];
+ bool sysfs_added;
bool enabled;
};
@@ -240,8 +247,10 @@ struct adf_hw_device_data {
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
- u32 (*uof_get_num_objs)(void);
+ u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
+ u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
@@ -249,6 +258,7 @@ struct adf_hw_device_data {
struct adf_ras_ops ras_ops;
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
+ struct adf_tl_hw_data tl_data;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -263,6 +273,7 @@ struct adf_hw_device_data {
u32 admin_ae_mask;
u16 tx_rings_mask;
u16 ring_to_svc_map;
+ u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
u8 tx_rx_gap;
u8 num_banks;
u16 num_banks_per_vf;
@@ -271,6 +282,7 @@ struct adf_hw_device_data {
u8 num_logical_accel;
u8 num_engines;
u32 num_hb_ctrs;
+ u8 num_rps;
};
/* CSR write macro */
@@ -303,6 +315,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
@@ -351,6 +364,7 @@ struct adf_accel_dev {
struct adf_cfg_device_data *cfg;
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
+ struct adf_telemetry *telemetry;
struct adf_dc_data *dc_data;
struct adf_pm power_management;
struct list_head crypto_list;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 6be064dc64c8..4b5d0350fc2e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
int i;
loader = loader_data->fw_loader;
- num_objs = hw_device->uof_get_num_objs();
+ num_objs = hw_device->uof_get_num_objs(accel_dev);
for (i = 0; i < num_objs; i++) {
obj_name = hw_device->uof_get_name(accel_dev, i);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 54b673ec2362..acad526eb741 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -498,6 +498,43 @@ int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt,
return ret;
}
+int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
+ dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
+ struct icp_qat_fw_init_admin_slice_cnt *slice_count)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_TL_START;
+ req.init_cfg_ptr = tl_dma_addr;
+ req.init_cfg_sz = layout_sz;
+
+ if (rp_indexes)
+ memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes));
+
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ memcpy(slice_count, &resp.slices, sizeof(*slice_count));
+
+ return 0;
+}
+
+int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ req.cmd_id = ICP_QAT_FW_TL_STOP;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h
index 55cbcbc66c9f..647c8e196752 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h
@@ -23,5 +23,9 @@ int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size);
int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err);
+int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
+ dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
+ struct icp_qat_fw_init_admin_slice_cnt *slice_count);
+int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 6e5de1dab97b..89df3888d7ea 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -47,6 +47,7 @@ enum adf_device_type {
DEV_C3XXX,
DEV_C3XXXVF,
DEV_4XXX,
+ DEV_420XX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 8e13fe938959..268052294468 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -2,6 +2,9 @@
/* Copyright(c) 2023 Intel Corporation */
#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include "adf_cfg.h"
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
@@ -18,3 +21,27 @@ const char *const adf_cfg_services[] = {
[SVC_SYM_DC] = ADF_CFG_SYM_DC,
};
EXPORT_SYMBOL_GPL(adf_cfg_services);
+
+int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ ADF_SERVICES_ENABLED " param not found\n");
+ return ret;
+ }
+
+ ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
+ services);
+ if (ret < 0)
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+ services);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_get_service_enabled);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index f78fd697b4be..c6b0328b0f5b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -5,6 +5,8 @@
#include "adf_cfg_strings.h"
+struct adf_accel_dev;
+
enum adf_services {
SVC_CY = 0,
SVC_CY2,
@@ -21,4 +23,6 @@ enum adf_services {
extern const char *const adf_cfg_services[SVC_COUNT];
+int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index 477efcc81a16..c42f5c25aabd 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -10,6 +10,7 @@
#include "adf_fw_counters.h"
#include "adf_heartbeat_dbgfs.h"
#include "adf_pm_dbgfs.h"
+#include "adf_tl_debugfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
@@ -66,6 +67,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
adf_heartbeat_dbgfs_add(accel_dev);
adf_pm_dbgfs_add(accel_dev);
adf_cnv_dbgfs_add(accel_dev);
+ adf_tl_dbgfs_add(accel_dev);
}
}
@@ -79,6 +81,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
return;
if (!accel_dev->is_vf) {
+ adf_tl_dbgfs_rm(accel_dev);
adf_cnv_dbgfs_rm(accel_dev);
adf_pm_dbgfs_rm(accel_dev);
adf_heartbeat_dbgfs_rm(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
new file mode 100644
index 000000000000..4f86696800c9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_CONFIG_H_
+#define ADF_FW_CONFIG_H_
+
+enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+ ADF_FW_DC_OBJ,
+ ADF_FW_ADMIN_OBJ,
+};
+
+struct adf_fw_config {
+ u32 ae_mask;
+ enum adf_fw_objs obj;
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
new file mode 100644
index 000000000000..fe1f3d727dc5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_services.h"
+#include "adf_cfg_strings.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_config.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_access_macros.h"
+#include "qat_compression.h"
+#include "qat_crypto.h"
+
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long bank, val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_crypto(accel_dev))
+ instances = min(cpus, banks / 2);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ bank = i * 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ bank += 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val;
+ int ret;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+}
+
+/**
+ * adf_gen4_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ goto err;
+
+ ret = sysfs_match_string(adf_cfg_services, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+ case SVC_CY:
+ case SVC_CY2:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_no_dev_config(accel_dev);
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_dev_config);
+
+int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ /* Default configuration is crypto only for even devices
+ * and compression for odd devices
+ */
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
new file mode 100644
index 000000000000..bb87655f69a8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_GEN4_CONFIG_H_
+#define ADF_GEN4_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 3148a62938fd..9985683056d5 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pm.h"
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
@@ -102,6 +104,131 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_ACCELERATORS_MASK;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
+
+u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_MAX_ACCELERATORS;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
+
+u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
+
+u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_PMISC_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
+
+u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_ETR_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
+
+u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_SRAM_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
+
+enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
+
+void adf_gen4_get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
+
+void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
+
+u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * GEN4 uses KPT counter for HB
+ */
+ return ADF_GEN4_KPT_COUNTER_FREQ;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
+
+void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
+ void __iomem *csr = misc_bar->virt_addr;
+
+ /* Enable all in errsou3 except VFLR notification on host */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
+
+void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+
+ addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
+
+int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+ u32 status;
+ u32 csr;
+ int ret;
+
+ addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_device);
+
static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
u32 *lower)
{
@@ -135,6 +262,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr;
+ int i;
+
+ csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+ for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
+
int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -192,3 +341,92 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
+
+static const u32 thrd_to_arb_map_dcc[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0
+};
+
+static const u16 rp_group_to_arb_mask[] = {
+ [RP_GROUP_0] = 0x5,
+ [RP_GROUP_1] = 0xA,
+};
+
+static bool is_single_service(int service_id)
+{
+ switch (service_id) {
+ case SVC_DC:
+ case SVC_SYM:
+ case SVC_ASYM:
+ return true;
+ case SVC_CY:
+ case SVC_CY2:
+ case SVC_DCC:
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ default:
+ return false;
+ }
+}
+
+int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int ae_cnt, worker_obj_cnt, i, j;
+ unsigned long ae_mask, thds_mask;
+ int srv_id, rp_group;
+ u32 thd2arb_map_base;
+ u16 arb_mask;
+
+ if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
+ !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
+ !hw_data->uof_get_ae_mask)
+ return -EFAULT;
+
+ srv_id = adf_get_service_enabled(accel_dev);
+ if (srv_id < 0)
+ return srv_id;
+
+ ae_cnt = hw_data->get_num_aes(hw_data);
+ worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
+ ADF_GEN4_ADMIN_ACCELENGINES;
+
+ if (srv_id == SVC_DCC) {
+ memcpy(thd2arb_map, thrd_to_arb_map_dcc,
+ array_size(sizeof(*thd2arb_map), ae_cnt));
+ return 0;
+ }
+
+ for (i = 0; i < worker_obj_cnt; i++) {
+ ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
+ rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
+ thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
+ thd2arb_map_base = 0;
+
+ if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
+ return -EINVAL;
+
+ if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
+ return -EINVAL;
+
+ if (is_single_service(srv_id))
+ arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
+ rp_group_to_arb_mask[RP_GROUP_1];
+ else
+ arb_mask = rp_group_to_arb_mask[rp_group];
+
+ for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_map_base |= arb_mask << (j * 4);
+
+ for_each_set_bit(j, &ae_mask, ae_cnt)
+ thd2arb_map[j] = thd2arb_map_base;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 1813fe1d5a06..7d8a774cadc8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -3,9 +3,57 @@
#ifndef ADF_GEN4_HW_CSR_DATA_H_
#define ADF_GEN4_HW_CSR_DATA_H_
+#include <linux/units.h>
+
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+/* PCIe configuration space */
+#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN4_SRAM_BAR 0
+#define ADF_GEN4_PMISC_BAR 1
+#define ADF_GEN4_ETR_BAR 2
+
+/* Clocks frequency */
+#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0
+#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4
+#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8
+#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC
+
+/* Accelerators */
+#define ADF_GEN4_ACCELERATORS_MASK 0x1
+#define ADF_GEN4_MAX_ACCELERATORS 1
+#define ADF_GEN4_ADMIN_ACCELENGINES 1
+
+/* MSIX interrupt */
+#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
+
+/* Bank and ring configuration */
+#define ADF_GEN4_MAX_RPS 64
+#define ADF_GEN4_NUM_RINGS_PER_BANK 2
+#define ADF_GEN4_NUM_BANKS_PER_VF 4
+#define ADF_GEN4_ETR_MAX_BANKS 64
+#define ADF_GEN4_RX_RINGS_OFFSET 1
+#define ADF_GEN4_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN4_ARB_OFFSET 0x0
+#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin Interface Reg Offset */
+#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970
+
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
#define ADF_RING_CSR_RING_CONFIG 0x1000
@@ -146,7 +194,46 @@ do { \
#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+/* Arbiter threads mask with error value */
+#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+
+enum icp_qat_gen4_slice_mask {
+ ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0),
+ ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3),
+ ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4),
+ ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5),
+ ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7),
+ ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8),
+ ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9),
+};
+
+enum adf_gen4_rp_groups {
+ RP_GROUP_0,
+ RP_GROUP_1,
+ RP_GROUP_COUNT
+};
+
+void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev);
+void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev);
+u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self);
+void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info);
+void adf_gen4_get_arb_info(struct arb_info *arb_info);
+u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self);
+u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self);
+u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self);
+u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self);
+enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self);
+u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self);
+int adf_gen4_init_device(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
new file mode 100644
index 000000000000..7fc7a77f6aed
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include "adf_gen4_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+
+#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4)
+
+#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4)
+
+#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, \
+ ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4))
+
+#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, \
+ ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4))
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max get to put latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* Get to put latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)),
+};
+
+/* Slice utilization counters. */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(cpr),
+ /* Translator slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(xlt),
+ /* Decompression slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr),
+ /* PKE utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(ucs),
+ /* Cipher slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(cph),
+ /* Authentication slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Slice execution counters. */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(cpr),
+ /* Translator slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(xlt),
+ /* Decompression slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr),
+ /* PKE execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(ucs),
+ /* Cipher slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(cph),
+ /* Authentication slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(ath),
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)),
+ /* Get to put latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+};
+
+void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h
new file mode 100644
index 000000000000..32df4163beb9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_GEN4_TL_H
+#define ADF_GEN4_TL_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN4_CPP_NS_PER_CYCLE 2
+#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value in milliseconds. */
+#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000
+/* Num of buffers to store historic values. */
+#define ADF_GEN4_TL_NUM_HIST_BUFFS \
+ (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type. */
+#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN4_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen4_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs)
+
+/**
+ * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_tlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cph_slices: array of Cipher slices utilization registers
+ * @cpr_slices: array of Compression slices utilization registers
+ * @xlt_slices: array of Translator slices utilization registers
+ * @dcpr_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ */
+struct adf_gen4_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_pci_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_tlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+};
+
+/**
+ * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair
+ * telemetry counter values as are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reserved: reserved
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen4_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reserved;
+ __u32 reg_tl_pci_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen4_tl_layout - This structure represents entire telemetry
+ * counters data: Device + 4 Ring Pairs as are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry messages counter
+ * @reserved: reserved
+ */
+struct adf_gen4_tl_layout {
+ struct adf_gen4_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen4_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout)
+#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN4_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 81c39f3d07e1..f43ae9111553 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -11,6 +11,7 @@
#include "adf_heartbeat.h"
#include "adf_rl.h"
#include "adf_sysfs_ras_counters.h"
+#include "adf_telemetry.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -142,6 +143,10 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
if (ret && ret != -EOPNOTSUPP)
return ret;
+ ret = adf_tl_init(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
@@ -220,6 +225,10 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
if (ret && ret != -EOPNOTSUPP)
return ret;
+ ret = adf_tl_start(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
@@ -279,6 +288,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
+ adf_tl_stop(accel_dev);
adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev);
adf_sysfs_stop_ras(accel_dev);
@@ -374,6 +384,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_heartbeat_shutdown(accel_dev);
+ adf_tl_shutdown(accel_dev);
+
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index 86e3e2152b1b..de1b214dba1f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -812,17 +812,16 @@ static int add_update_sla(struct adf_accel_dev *accel_dev,
if (!sla_in) {
dev_warn(&GET_DEV(accel_dev),
"SLA input data pointer is missing\n");
- ret = -EFAULT;
- goto ret_err;
+ return -EFAULT;
}
+ mutex_lock(&rl_data->rl_lock);
+
/* Input validation */
ret = validate_user_input(accel_dev, sla_in, is_update);
if (ret)
goto ret_err;
- mutex_lock(&rl_data->rl_lock);
-
if (is_update) {
ret = validate_sla_id(accel_dev, sla_in->sla_id);
if (ret)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index eb5a330f8543..269c6656fb90 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -79,6 +79,7 @@ struct adf_rl_interface_data {
struct adf_rl_sla_input_data input;
enum adf_base_services cap_rem_srv;
struct rw_semaphore lock;
+ bool sysfs_added;
};
struct adf_rl_hw_data {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index ddffc98119c6..d450dad32c9e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -215,6 +215,9 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
enum adf_cfg_service_type svc;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
hw_data = GET_HW_DATA(accel_dev);
if (accel_dev->sysfs.ring_num == UNSET_RING_NUM)
@@ -242,7 +245,8 @@ static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adf_accel_dev *accel_dev;
- int ring, num_rings, ret;
+ int num_rings, ret;
+ unsigned int ring;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
index cffe2d722995..e97c67c87b3c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
@@ -99,6 +99,8 @@ void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev)
if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group))
dev_err(&GET_DEV(accel_dev),
"Failed to create qat_ras attribute group.\n");
+
+ accel_dev->ras_errors.sysfs_added = true;
}
void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev)
@@ -106,7 +108,10 @@ void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev)
if (!accel_dev->ras_errors.enabled)
return;
- device_remove_group(&GET_DEV(accel_dev), &qat_ras_group);
+ if (accel_dev->ras_errors.sysfs_added) {
+ device_remove_group(&GET_DEV(accel_dev), &qat_ras_group);
+ accel_dev->ras_errors.sysfs_added = false;
+ }
ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index abf9c52474ec..bedb514d4e30 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -441,11 +441,19 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev)
data->cap_rem_srv = ADF_SVC_NONE;
data->input.srv = ADF_SVC_NONE;
+ data->sysfs_added = true;
return ret;
}
void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev)
{
+ struct adf_rl_interface_data *data;
+
+ data = &GET_RL_STRUCT(accel_dev);
+ if (!data->sysfs_added)
+ return;
+
device_remove_group(&GET_DEV(accel_dev), &qat_rl_group);
+ data->sysfs_added = false;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
new file mode 100644
index 000000000000..2ff714d11bd2
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#define dev_fmt(fmt) "Telemetry: " fmt
+
+#include <asm/errno.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "adf_admin.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_telemetry.h"
+
+#define TL_IS_ZERO(input) ((input) == 0)
+
+static bool is_tl_supported(struct adf_accel_dev *accel_dev)
+{
+ u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities;
+
+ return fw_caps & TL_CAPABILITY_BIT;
+}
+
+static int validate_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ if (!tl_data->dev_counters ||
+ TL_IS_ZERO(tl_data->num_dev_counters) ||
+ !tl_data->sl_util_counters ||
+ !tl_data->sl_exec_counters ||
+ !tl_data->rp_counters ||
+ TL_IS_ZERO(tl_data->num_rp_counters))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t regs_sz = tl_data->layout_sz;
+ struct adf_telemetry *telemetry;
+ int node = dev_to_node(dev);
+ void *tl_data_regs;
+ unsigned int i;
+
+ telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
+ if (!telemetry)
+ return -ENOMEM;
+
+ telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
+ sizeof(*telemetry->rp_num_indexes),
+ GFP_KERNEL);
+ if (!telemetry->rp_num_indexes)
+ goto err_free_tl;
+
+ telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff,
+ sizeof(*telemetry->regs_hist_buff),
+ GFP_KERNEL);
+ if (!telemetry->regs_hist_buff)
+ goto err_free_rp_indexes;
+
+ telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
+ &telemetry->regs_data_p,
+ GFP_KERNEL);
+ if (!telemetry->regs_data)
+ goto err_free_regs_hist_buff;
+
+ for (i = 0; i < tl_data->num_hbuff; i++) {
+ tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
+ if (!tl_data_regs)
+ goto err_free_dma;
+
+ telemetry->regs_hist_buff[i] = tl_data_regs;
+ }
+
+ accel_dev->telemetry = telemetry;
+
+ return 0;
+
+err_free_dma:
+ dma_free_coherent(dev, regs_sz, telemetry->regs_data,
+ telemetry->regs_data_p);
+
+ while (i--)
+ kfree(telemetry->regs_hist_buff[i]);
+
+err_free_regs_hist_buff:
+ kfree(telemetry->regs_hist_buff);
+err_free_rp_indexes:
+ kfree(telemetry->rp_num_indexes);
+err_free_tl:
+ kfree(telemetry);
+
+ return -ENOMEM;
+}
+
+static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t regs_sz = tl_data->layout_sz;
+ unsigned int i;
+
+ for (i = 0; i < tl_data->num_hbuff; i++)
+ kfree(telemetry->regs_hist_buff[i]);
+
+ dma_free_coherent(dev, regs_sz, telemetry->regs_data,
+ telemetry->regs_data_p);
+
+ kfree(telemetry->regs_hist_buff);
+ kfree(telemetry->rp_num_indexes);
+ kfree(telemetry);
+ accel_dev->telemetry = NULL;
+}
+
+static unsigned long get_next_timeout(void)
+{
+ return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
+}
+
+static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
+{
+ void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
+ void *src = telemetry->regs_data;
+
+ memcpy(dst, src, size);
+}
+
+static void tl_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct adf_telemetry *telemetry;
+ struct adf_tl_hw_data *tl_data;
+ u32 msg_cnt, old_msg_cnt;
+ size_t layout_sz;
+ u32 *regs_data;
+ size_t id;
+
+ delayed_work = to_delayed_work(work);
+ telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
+ tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ regs_data = telemetry->regs_data;
+
+ id = tl_data->msg_cnt_off / sizeof(*regs_data);
+ layout_sz = tl_data->layout_sz;
+
+ if (!atomic_read(&telemetry->state)) {
+ cancel_delayed_work_sync(&telemetry->work_ctx);
+ return;
+ }
+
+ msg_cnt = regs_data[id];
+ old_msg_cnt = msg_cnt;
+ if (msg_cnt == telemetry->msg_cnt)
+ goto out;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ snapshot_regs(telemetry, layout_sz);
+
+ /* Check if data changed while updating it */
+ msg_cnt = regs_data[id];
+ if (old_msg_cnt != msg_cnt)
+ snapshot_regs(telemetry, layout_sz);
+
+ telemetry->msg_cnt = msg_cnt;
+ telemetry->hb_num++;
+ telemetry->hb_num %= telemetry->hbuffs;
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+out:
+ adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
+}
+
+int adf_tl_halt(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ int ret;
+
+ cancel_delayed_work_sync(&telemetry->work_ctx);
+ atomic_set(&telemetry->state, 0);
+
+ ret = adf_send_admin_tl_stop(accel_dev);
+ if (ret)
+ dev_err(dev, "failed to stop telemetry\n");
+
+ return ret;
+}
+
+int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t layout_sz = tl_data->layout_sz;
+ int ret;
+
+ ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
+ layout_sz, telemetry->rp_num_indexes,
+ &telemetry->slice_cnt);
+ if (ret) {
+ dev_err(dev, "failed to start telemetry\n");
+ return ret;
+ }
+
+ telemetry->hbuffs = state;
+ atomic_set(&telemetry->state, state);
+
+ adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
+
+ return 0;
+}
+
+int adf_tl_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
+ struct device *dev = &GET_DEV(accel_dev);
+ struct adf_telemetry *telemetry;
+ unsigned int i;
+ int ret;
+
+ ret = validate_tl_data(tl_data);
+ if (ret)
+ return ret;
+
+ ret = adf_tl_alloc_mem(accel_dev);
+ if (ret) {
+ dev_err(dev, "failed to initialize: %d\n", ret);
+ return ret;
+ }
+
+ telemetry = accel_dev->telemetry;
+ telemetry->accel_dev = accel_dev;
+
+ mutex_init(&telemetry->wr_lock);
+ mutex_init(&telemetry->regs_hist_lock);
+ INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
+
+ for (i = 0; i < max_rp; i++)
+ telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
+
+ return 0;
+}
+
+int adf_tl_start(struct adf_accel_dev *accel_dev)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+
+ if (!accel_dev->telemetry)
+ return -EOPNOTSUPP;
+
+ if (!is_tl_supported(accel_dev)) {
+ dev_info(dev, "feature not supported by FW\n");
+ adf_tl_free_mem(accel_dev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void adf_tl_stop(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->telemetry)
+ return;
+
+ if (atomic_read(&accel_dev->telemetry->state))
+ adf_tl_halt(accel_dev);
+}
+
+void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->telemetry)
+ return;
+
+ adf_tl_free_mem(accel_dev);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
new file mode 100644
index 000000000000..9be81cd3b886
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_TELEMETRY_H
+#define ADF_TELEMETRY_H
+
+#include <linux/bits.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "icp_qat_fw_init_admin.h"
+
+struct adf_accel_dev;
+struct adf_tl_dbg_counter;
+struct dentry;
+
+#define ADF_TL_SL_CNT_COUNT \
+ (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8))
+
+#define TL_CAPABILITY_BIT BIT(1)
+/* Interval within device writes data to DMA region. Value in milliseconds. */
+#define ADF_TL_DATA_WR_INTERVAL_MS 1000
+/* Interval within timer interrupt should be handled. Value in milliseconds. */
+#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2)
+
+#define ADF_TL_RP_REGS_DISABLED (0xff)
+
+struct adf_tl_hw_data {
+ size_t layout_sz;
+ size_t slice_reg_sz;
+ size_t rp_reg_sz;
+ size_t msg_cnt_off;
+ const struct adf_tl_dbg_counter *dev_counters;
+ const struct adf_tl_dbg_counter *sl_util_counters;
+ const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter *rp_counters;
+ u8 num_hbuff;
+ u8 cpp_ns_per_cycle;
+ u8 bw_units_to_bytes;
+ u8 num_dev_counters;
+ u8 num_rp_counters;
+ u8 max_rp;
+};
+
+struct adf_telemetry {
+ struct adf_accel_dev *accel_dev;
+ atomic_t state;
+ u32 hbuffs;
+ int hb_num;
+ u32 msg_cnt;
+ dma_addr_t regs_data_p; /* bus address for DMA mapping */
+ void *regs_data; /* virtual address for DMA mapping */
+ /**
+ * @regs_hist_buff: array of pointers to copies of the last @hbuffs
+ * values of @regs_data
+ */
+ void **regs_hist_buff;
+ struct dentry *dbg_dir;
+ u8 *rp_num_indexes;
+ /**
+ * @regs_hist_lock: protects from race conditions between write and read
+ * to the copies referenced by @regs_hist_buff
+ */
+ struct mutex regs_hist_lock;
+ /**
+ * @wr_lock: protects from concurrent writes to debugfs telemetry files
+ */
+ struct mutex wr_lock;
+ struct delayed_work work_ctx;
+ struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_tl_init(struct adf_accel_dev *accel_dev);
+int adf_tl_start(struct adf_accel_dev *accel_dev);
+void adf_tl_stop(struct adf_accel_dev *accel_dev);
+void adf_tl_shutdown(struct adf_accel_dev *accel_dev);
+int adf_tl_run(struct adf_accel_dev *accel_dev, int state);
+int adf_tl_halt(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_tl_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline int adf_tl_start(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_tl_stop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_TELEMETRY_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
new file mode 100644
index 000000000000..c8241f5a0a26
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#define dev_fmt(fmt) "Telemetry debugfs: " fmt
+
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_strings.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+
+#define TL_VALUE_MIN_PADDING 20
+#define TL_KEY_MIN_PADDING 23
+#define TL_RP_SRV_UNKNOWN "Unknown"
+
+static int tl_collect_values_u32(struct adf_telemetry *telemetry,
+ size_t counter_offset, u64 *arr)
+{
+ unsigned int samples, hb_idx, i;
+ u32 *regs_hist_buff;
+ u32 counter_val;
+
+ samples = min(telemetry->msg_cnt, telemetry->hbuffs);
+ hb_idx = telemetry->hb_num + telemetry->hbuffs - samples;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ for (i = 0; i < samples; i++) {
+ regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs];
+ counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)];
+ arr[i] = counter_val;
+ hb_idx++;
+ }
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+ return samples;
+}
+
+static int tl_collect_values_u64(struct adf_telemetry *telemetry,
+ size_t counter_offset, u64 *arr)
+{
+ unsigned int samples, hb_idx, i;
+ u64 *regs_hist_buff;
+ u64 counter_val;
+
+ samples = min(telemetry->msg_cnt, telemetry->hbuffs);
+ hb_idx = telemetry->hb_num + telemetry->hbuffs - samples;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ for (i = 0; i < samples; i++) {
+ regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs];
+ counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)];
+ arr[i] = counter_val;
+ hb_idx++;
+ }
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+ return samples;
+}
+
+/**
+ * avg_array() - Return average of values within an array.
+ * @array: Array of values.
+ * @len: Number of elements.
+ *
+ * This algorithm computes average of an array without running into overflow.
+ *
+ * Return: average of values.
+ */
+#define avg_array(array, len) ( \
+{ \
+ typeof(&(array)[0]) _array = (array); \
+ __unqual_scalar_typeof(_array[0]) _x = 0; \
+ __unqual_scalar_typeof(_array[0]) _y = 0; \
+ __unqual_scalar_typeof(_array[0]) _a, _b; \
+ typeof(len) _len = (len); \
+ size_t _i; \
+ \
+ for (_i = 0; _i < _len; _i++) { \
+ _a = _array[_i]; \
+ _b = do_div(_a, _len); \
+ _x += _a; \
+ if (_y >= _len - _b) { \
+ _x++; \
+ _y -= _len - _b; \
+ } else { \
+ _y += _b; \
+ } \
+ } \
+ do_div(_y, _len); \
+ (_x + _y); \
+})
+
+/* Calculation function for simple counter. */
+static int tl_calc_count(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u64 *hist_vals;
+ int sample_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals),
+ GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_vals;
+
+ vals->curr = hist_vals[sample_cnt - 1];
+ vals->min = min_array(hist_vals, sample_cnt);
+ vals->max = max_array(hist_vals, sample_cnt);
+ vals->avg = avg_array(hist_vals, sample_cnt);
+
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+/* Convert CPP bus cycles to ns. */
+static int tl_cycles_to_ns(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle;
+ int ret;
+
+ ret = tl_calc_count(telemetry, ctr, vals);
+ if (ret)
+ return ret;
+
+ vals->curr *= cpp_ns_per_cycle;
+ vals->min *= cpp_ns_per_cycle;
+ vals->max *= cpp_ns_per_cycle;
+ vals->avg *= cpp_ns_per_cycle;
+
+ return 0;
+}
+
+/*
+ * Compute latency cumulative average with division of accumulated value
+ * by sample count. Returned value is in ns.
+ */
+static int tl_lat_acc_avg(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle;
+ u8 num_hbuff = tl_data->num_hbuff;
+ int sample_cnt, i;
+ u64 *hist_vals;
+ u64 *hist_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL);
+ if (!hist_cnt) {
+ ret = -ENOMEM;
+ goto out_free_hist_vals;
+ }
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_cnt;
+
+ tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt);
+
+ for (i = 0; i < sample_cnt; i++) {
+ /* Avoid division by 0 if count is 0. */
+ if (hist_cnt[i])
+ hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle,
+ hist_cnt[i]);
+ else
+ hist_vals[i] = 0;
+ }
+
+ vals->curr = hist_vals[sample_cnt - 1];
+ vals->min = min_array(hist_vals, sample_cnt);
+ vals->max = max_array(hist_vals, sample_cnt);
+ vals->avg = avg_array(hist_vals, sample_cnt);
+
+out_free_hist_cnt:
+ kfree(hist_cnt);
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+/* Convert HW raw bandwidth units to Mbps. */
+static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE;
+ u64 *hist_vals;
+ int sample_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals),
+ GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_vals;
+
+ vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA);
+ vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+ vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+ vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+static void tl_seq_printf_counter(struct adf_telemetry *telemetry,
+ struct seq_file *s, const char *name,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr);
+ if (atomic_read(&telemetry->state) > 1) {
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg);
+ }
+ seq_puts(s, "\n");
+}
+
+static int tl_calc_and_print_counter(struct adf_telemetry *telemetry,
+ struct seq_file *s,
+ const struct adf_tl_dbg_counter *ctr,
+ const char *name)
+{
+ const char *counter_name = name ? name : ctr->name;
+ enum adf_tl_counter_type type = ctr->type;
+ struct adf_tl_dbg_aggr_values vals;
+ int ret;
+
+ switch (type) {
+ case ADF_TL_SIMPLE_COUNT:
+ ret = tl_calc_count(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_NS:
+ ret = tl_cycles_to_ns(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_NS_AVG:
+ ret = tl_lat_acc_avg(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_MBPS:
+ ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ tl_seq_printf_counter(telemetry, s, counter_name, &vals);
+
+ return 0;
+}
+
+static int tl_print_sl_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id)
+{
+ size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ size_t offset_inc = cnt_id * sl_regs_sz;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id);
+ slice_ctr = *ctr;
+ slice_ctr.offset1 += offset_inc;
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type, u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *sl_tl_util_counters;
+ const struct adf_tl_dbg_counter *sl_tl_exec_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ int ret;
+
+ sl_tl_util_counters = tl_data->sl_util_counters;
+ sl_tl_exec_counters = tl_data->sl_exec_counters;
+
+ ctr = &sl_tl_util_counters[cnt_type];
+
+ ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+
+ ctr = &sl_tl_exec_counters[cnt_type];
+
+ ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice execution counter type\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
+{
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
+ seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt);
+}
+
+static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
+ struct seq_file *s)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *dev_tl_counters;
+ u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
+ const struct adf_tl_dbg_counter *ctr;
+ unsigned int i;
+ int ret;
+ u8 j;
+
+ if (!atomic_read(&telemetry->state)) {
+ dev_info(&GET_DEV(accel_dev), "not enabled\n");
+ return -EPERM;
+ }
+
+ dev_tl_counters = tl_data->dev_counters;
+
+ tl_print_msg_cnt(s, telemetry->msg_cnt);
+
+ /* Print device level telemetry. */
+ for (i = 0; i < num_dev_counters; i++) {
+ ctr = &dev_tl_counters[i];
+ ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid counter type\n");
+ return ret;
+ }
+ }
+
+ /* Print per slice telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < sl_cnt[i]; j++) {
+ ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tl_dev_data_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ return tl_print_dev_data(accel_dev, s);
+}
+DEFINE_SHOW_ATTRIBUTE(tl_dev_data);
+
+static int tl_control_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state));
+
+ return 0;
+}
+
+static ssize_t tl_control_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq_f = file->private_data;
+ struct adf_accel_dev *accel_dev;
+ struct adf_telemetry *telemetry;
+ struct adf_tl_hw_data *tl_data;
+ struct device *dev;
+ u32 input;
+ int ret;
+
+ accel_dev = seq_f->private;
+ if (!accel_dev)
+ return -EINVAL;
+
+ tl_data = &GET_TL_DATA(accel_dev);
+ telemetry = accel_dev->telemetry;
+ dev = &GET_DEV(accel_dev);
+
+ mutex_lock(&telemetry->wr_lock);
+
+ ret = kstrtou32_from_user(userbuf, count, 10, &input);
+ if (ret)
+ goto unlock_and_exit;
+
+ if (input > tl_data->num_hbuff) {
+ dev_info(dev, "invalid control input\n");
+ ret = -EINVAL;
+ goto unlock_and_exit;
+ }
+
+ /* If input is 0, just stop telemetry. */
+ if (!input) {
+ ret = adf_tl_halt(accel_dev);
+ if (!ret)
+ ret = count;
+
+ goto unlock_and_exit;
+ }
+
+ /* If TL is already enabled, stop it. */
+ if (atomic_read(&telemetry->state)) {
+ dev_info(dev, "already enabled, restarting.\n");
+ ret = adf_tl_halt(accel_dev);
+ if (ret)
+ goto unlock_and_exit;
+ }
+
+ ret = adf_tl_run(accel_dev, input);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = count;
+
+unlock_and_exit:
+ mutex_unlock(&telemetry->wr_lock);
+ return ret;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
+
+static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
+{
+ char alpha;
+ u8 index;
+ int ret;
+
+ ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
+ *rp_id = index;
+
+ return 0;
+}
+
+static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
+ unsigned int new_rp_num,
+ unsigned int rp_regs_index)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ unsigned int i;
+ u8 curr_state;
+ int ret;
+
+ if (new_rp_num >= hw_data->num_rps) {
+ dev_info(dev, "invalid Ring Pair number selected\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw_data->tl_data.max_rp; i++) {
+ if (telemetry->rp_num_indexes[i] == new_rp_num) {
+ dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n",
+ new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i));
+ return 0;
+ }
+ }
+
+ dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n",
+ new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index));
+
+ curr_state = atomic_read(&telemetry->state);
+
+ if (curr_state) {
+ ret = adf_tl_halt(accel_dev);
+ if (ret)
+ return ret;
+
+ telemetry->rp_num_indexes[rp_regs_index] = new_rp_num;
+
+ ret = adf_tl_run(accel_dev, curr_state);
+ if (ret)
+ return ret;
+ } else {
+ telemetry->rp_num_indexes[rp_regs_index] = new_rp_num;
+ }
+
+ return 0;
+}
+
+static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
+ u8 rp_idx)
+{
+ u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf;
+ enum adf_cfg_service_type svc;
+
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE);
+
+ svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf);
+ switch (svc) {
+ case COMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC);
+ break;
+ case SYM:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM);
+ break;
+ case ASYM:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
+ break;
+ default:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
+ break;
+ }
+}
+
+static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s,
+ u8 rp_regs_index)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *rp_tl_counters;
+ u8 num_rp_counters = tl_data->num_rp_counters;
+ size_t rp_regs_sz = tl_data->rp_reg_sz;
+ struct adf_tl_dbg_counter ctr;
+ unsigned int i;
+ u8 rp_idx;
+ int ret;
+
+ if (!atomic_read(&telemetry->state)) {
+ dev_info(&GET_DEV(accel_dev), "not enabled\n");
+ return -EPERM;
+ }
+
+ rp_tl_counters = tl_data->rp_counters;
+ rp_idx = telemetry->rp_num_indexes[rp_regs_index];
+
+ if (rp_idx == ADF_TL_RP_REGS_DISABLED) {
+ dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n",
+ ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index));
+ return -EPERM;
+ }
+
+ tl_print_msg_cnt(s, telemetry->msg_cnt);
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX);
+ seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx);
+ tl_print_rp_srv(accel_dev, s, rp_idx);
+
+ for (i = 0; i < num_rp_counters; i++) {
+ ctr = rp_tl_counters[i];
+ ctr.offset1 += rp_regs_sz * rp_regs_index;
+ ctr.offset2 += rp_regs_sz * rp_regs_index;
+ ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "invalid RP counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tl_rp_data_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+ u8 rp_regs_index;
+ u8 max_rp;
+ int ret;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ max_rp = GET_TL_DATA(accel_dev).max_rp;
+ ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
+ return ret;
+ }
+
+ return tl_print_rp_data(accel_dev, s, rp_regs_index);
+}
+
+static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq_f = file->private_data;
+ struct adf_accel_dev *accel_dev;
+ struct adf_telemetry *telemetry;
+ unsigned int new_rp_num;
+ u8 rp_regs_index;
+ u8 max_rp;
+ int ret;
+
+ accel_dev = seq_f->private;
+ if (!accel_dev)
+ return -EINVAL;
+
+ telemetry = accel_dev->telemetry;
+ max_rp = GET_TL_DATA(accel_dev).max_rp;
+
+ mutex_lock(&telemetry->wr_lock);
+
+ ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
+ goto unlock_and_exit;
+ }
+
+ ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = count;
+
+unlock_and_exit:
+ mutex_unlock(&telemetry->wr_lock);
+ return ret;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data);
+
+void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct dentry *parent = accel_dev->debugfs_dir;
+ u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
+ char name[ADF_TL_RP_REGS_FNAME_SIZE];
+ struct dentry *dir;
+ unsigned int i;
+
+ if (!telemetry)
+ return;
+
+ dir = debugfs_create_dir("telemetry", parent);
+ accel_dev->telemetry->dbg_dir = dir;
+ debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops);
+ debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops);
+
+ for (i = 0; i < max_rp; i++) {
+ snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
+ ADF_TL_DBG_RP_ALPHA_INDEX(i));
+ debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ }
+}
+
+void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct dentry *dbg_dir;
+
+ if (!telemetry)
+ return;
+
+ dbg_dir = telemetry->dbg_dir;
+
+ debugfs_remove_recursive(dbg_dir);
+
+ if (atomic_read(&telemetry->state))
+ adf_tl_halt(accel_dev);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
new file mode 100644
index 000000000000..11cc9eae19b3
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_TL_DEBUGFS_H
+#define ADF_TL_DEBUGFS_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+#define MAX_COUNT_NAME_SIZE 32
+#define SNAPSHOT_CNT_MSG "sample_cnt"
+#define RP_NUM_INDEX "rp_num"
+#define PCI_TRANS_CNT_NAME "pci_trans_cnt"
+#define MAX_RD_LAT_NAME "max_rd_lat"
+#define RD_LAT_ACC_NAME "rd_lat_acc_avg"
+#define MAX_LAT_NAME "max_gp_lat"
+#define LAT_ACC_NAME "gp_lat_acc_avg"
+#define BW_IN_NAME "bw_in"
+#define BW_OUT_NAME "bw_out"
+#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
+#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
+#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
+#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit"
+#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss"
+#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit"
+#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss"
+#define RP_SERVICE_TYPE "service_type"
+
+#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A')
+#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A')
+
+#define ADF_TL_RP_REGS_FNAME "rp_%c_data"
+#define ADF_TL_RP_REGS_FNAME_SIZE 16
+
+#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \
+ offsetof(struct adf_##qat_gen##_tl_layout, reg)
+
+#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \
+ (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg))
+
+#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+
+#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
+ (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
+
+/**
+ * enum adf_tl_counter_type - telemetry counter types
+ * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter
+ * @ADF_TL_SIMPLE_COUNT: simple counter
+ * @ADF_TL_COUNTER_NS: latency counter, value in ns
+ * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns
+ * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps
+ */
+enum adf_tl_counter_type {
+ ADF_TL_COUNTER_UNSUPPORTED,
+ ADF_TL_SIMPLE_COUNT,
+ ADF_TL_COUNTER_NS,
+ ADF_TL_COUNTER_NS_AVG,
+ ADF_TL_COUNTER_MBPS,
+};
+
+/**
+ * struct adf_tl_dbg_counter - telemetry counter definition
+ * @name: name of the counter as printed in the report
+ * @adf_tl_counter_type: type of the counter
+ * @offset1: offset of 1st register
+ * @offset2: offset of 2nd optional register
+ */
+struct adf_tl_dbg_counter {
+ const char *name;
+ enum adf_tl_counter_type type;
+ size_t offset1;
+ size_t offset2;
+};
+
+#define ADF_TL_COUNTER(_name, _type, _offset) \
+{ .name = _name, \
+ .type = _type, \
+ .offset1 = _offset \
+}
+
+#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \
+{ .name = _name, \
+ .type = _type, \
+ .offset1 = _offset1, \
+ .offset2 = _offset2 \
+}
+
+/* Telemetry counter aggregated values. */
+struct adf_tl_dbg_aggr_values {
+ u64 curr;
+ u64 min;
+ u64 max;
+ u64 avg;
+};
+
+/**
+ * adf_tl_dbgfs_add() - Add telemetry's debug fs entries.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Creates telemetry's debug fs folder and attributes in QAT debug fs root.
+ */
+void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev);
+
+/**
+ * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Removes telemetry's debug fs folder and attributes from QAT debug fs root.
+ */
+void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_TL_DEBUGFS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index cd418b51d9f3..63cf18e2a4e5 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -29,6 +29,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_RL_ADD = 134,
ICP_QAT_FW_RL_UPDATE = 135,
ICP_QAT_FW_RL_REMOVE = 136,
+ ICP_QAT_FW_TL_START = 137,
+ ICP_QAT_FW_TL_STOP = 138,
};
enum icp_qat_fw_init_admin_resp_status {
@@ -36,6 +38,13 @@ enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_FAIL
};
+struct icp_qat_fw_init_admin_tl_rp_indexes {
+ __u8 rp_num_index_0;
+ __u8 rp_num_index_1;
+ __u8 rp_num_index_2;
+ __u8 rp_num_index_3;
+};
+
struct icp_qat_fw_init_admin_slice_cnt {
__u8 cpr_cnt;
__u8 xlt_cnt;
@@ -87,6 +96,7 @@ struct icp_qat_fw_init_admin_req {
__u8 rp_count;
};
__u32 idle_filter;
+ struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes;
};
__u32 resrvd4;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index eb2ef225bcee..b8f1c4ffb8b5 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -18,7 +18,12 @@ enum icp_qat_hw_ae_id {
ICP_QAT_HW_AE_9 = 9,
ICP_QAT_HW_AE_10 = 10,
ICP_QAT_HW_AE_11 = 11,
- ICP_QAT_HW_AE_DELIMITER = 12
+ ICP_QAT_HW_AE_12 = 12,
+ ICP_QAT_HW_AE_13 = 13,
+ ICP_QAT_HW_AE_14 = 14,
+ ICP_QAT_HW_AE_15 = 15,
+ ICP_QAT_HW_AE_16 = 16,
+ ICP_QAT_HW_AE_DELIMITER = 17
};
enum icp_qat_hw_qat_id {
@@ -95,7 +100,7 @@ enum icp_qat_capabilities_mask {
/* Bits 10-11 are currently reserved */
ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
- /* Bit 14 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14),
ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
@@ -107,7 +112,10 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
- ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
+ ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26),
+ /* Bits 27-28 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29),
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30),
};
#define QAT_AUTH_MODE_BITPOS 4
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 69482abdb8b9..e28241bdd0f4 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,7 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
-#define ICP_QAT_UCLO_MAX_AE 12
+#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
#define ICP_QAT_UCLO_MAX_USTORE 0x4000
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index cbb946a80076..317cafa9d11f 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
+ case ADF_420XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- handle->chip_info->icp_rst_mask = 0x100015;
+ if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ handle->chip_info->icp_rst_mask = 0x100155;
+ else
+ handle->chip_info->icp_rst_mask = 0x100015;
handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
handle->chip_info->wakeup_event_val = 0x80000000;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index e27ea7e28c51..ad2c64af7427 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
+ case ADF_420XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",