summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-24 21:25:37 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-24 21:25:37 +0300
commite229b429bb4af24d9828758c0c851bb6a4169400 (patch)
tree95e49922f6c68b5f81cbf7a39349cfad42c5a0f1 /drivers/misc
parent7ac1161c2789be25d0d206e831b051f43028866e (diff)
parentd19db80a366576d3ffadf2508ed876b4c1faf959 (diff)
downloadlinux-e229b429bb4af24d9828758c0c851bb6a4169400.tar.xz
Merge tag 'char-misc-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the large set of char/misc/whatever driver subsystem updates for 5.12-rc1. Over time it seems like this tree is collecting more and more tiny driver subsystems in one place, making it easier for those maintainers, which is why this is getting larger. Included in here are: - coresight driver updates - habannalabs driver updates - virtual acrn driver addition (proper acks from the x86 maintainers) - broadcom misc driver addition - speakup driver updates - soundwire driver updates - fpga driver updates - amba driver updates - mei driver updates - vfio driver updates - greybus driver updates - nvmeem driver updates - phy driver updates - mhi driver updates - interconnect driver udpates - fsl-mc bus driver updates - random driver fix - some small misc driver updates (rtsx, pvpanic, etc.) All of these have been in linux-next for a while, with the only reported issue being a merge conflict due to the dfl_device_id addition from the fpga subsystem in here" * tag 'char-misc-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (311 commits) spmi: spmi-pmic-arb: Fix hw_irq overflow Documentation: coresight: Add PID tracing description coresight: etm-perf: Support PID tracing for kernel at EL2 coresight: etm-perf: Clarify comment on perf options ACRN: update MAINTAINERS: mailing list is subscribers-only regmap: sdw-mbq: use MODULE_LICENSE("GPL") regmap: sdw: use no_pm routines for SoundWire 1.2 MBQ regmap: sdw: use _no_pm functions in regmap_read/write soundwire: intel: fix possible crash when no device is detected MAINTAINERS: replace my with email with replacements mhi: Fix double dma free uapi: map_to_7segment: Update example in documentation uio: uio_pci_generic: don't fail probe if pdev->irq equals to IRQ_NOTCONNECTED drivers/misc/vmw_vmci: restrict too big queue size in qp_host_alloc_queue firewire: replace tricky statement by two simple ones vme: make remove callback return void firmware: google: make coreboot driver's remove callback return void firmware: xilinx: Use explicit values for all enum values sample/acrn: Introduce a sample of HSM ioctl interface usage virt: acrn: Introduce an interface for Service VM to control vCPU ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel_tclib.c200
-rw-r--r--drivers/misc/bcm-vk/Kconfig29
-rw-r--r--drivers/misc/bcm-vk/Makefile12
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h549
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_dev.c1652
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c1357
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.h163
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.c275
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.h61
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c339
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c9
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c17
-rw-r--r--drivers/misc/fastrpc.c7
-rw-r--r--drivers/misc/habanalabs/common/Makefile10
-rw-r--r--drivers/misc/habanalabs/common/asid.c6
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c8
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c473
-rw-r--r--drivers/misc/habanalabs/common/context.c33
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c43
-rw-r--r--drivers/misc/habanalabs/common/device.c46
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c157
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h109
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c25
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c51
-rw-r--r--drivers/misc/habanalabs/common/memory.c621
-rw-r--r--drivers/misc/habanalabs/common/mmu/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c (renamed from drivers/misc/habanalabs/common/mmu.c)124
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c (renamed from drivers/misc/habanalabs/common/mmu_v1.c)4
-rw-r--r--drivers/misc/habanalabs/common/pci/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c (renamed from drivers/misc/habanalabs/common/pci.c)47
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c481
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h3
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c18
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c106
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c11
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c5
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h14
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h19
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h4
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h5
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h27
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h5
-rw-r--r--drivers/misc/mei/bus.c24
-rw-r--r--drivers/misc/mei/client.c291
-rw-r--r--drivers/misc/mei/client.h8
-rw-r--r--drivers/misc/mei/debugfs.c1
-rw-r--r--drivers/misc/mei/hbm.c165
-rw-r--r--drivers/misc/mei/hbm.h4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c7
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/hw.h61
-rw-r--r--drivers/misc/mei/init.c5
-rw-r--r--drivers/misc/mei/interrupt.c43
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/pti.c978
-rw-r--r--drivers/misc/pvpanic.c59
-rw-r--r--drivers/misc/sgi-xp/xpnet.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c19
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h2
67 files changed, 6981 insertions, 1887 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fafa8b0d8099..f532c59bb59b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -50,14 +50,6 @@ config AD525X_DPOT_SPI
To compile this driver as a module, choose M here: the
module will be called ad525x_dpot-spi.
-config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on ARCH_AT91
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
- these blocks by different drivers despite processor differences.
-
config DUMMY_IRQ
tristate "Dummy IRQ handler"
help
@@ -112,19 +104,6 @@ config PHANTOM
If you choose to build module, its name will be phantom. If unsure,
say N here.
-config INTEL_MID_PTI
- tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
- depends on PCI && TTY && (X86_INTEL_MID || COMPILE_TEST)
- help
- The PTI (Parallel Trace Interface) driver directs
- trace data routed from various parts in the system out
- through an Intel Penwell PTI port and out of the mobile
- device for analysis with a debugging tool (Lauterbach or Fido).
-
- You should select this driver if the target kernel is meant for
- an Intel Atom (non-netbook) mobile device containing a MIPI
- P1149.7 standard implementation.
-
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
@@ -478,6 +457,7 @@ source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
+source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d23231e73330..99b6f15a3c70 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -8,9 +8,7 @@ obj-$(CONFIG_IBMVMC) += ibmvmc.o
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
-obj-$(CONFIG_INTEL_MID_PTI) += pti.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
-obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm/
@@ -51,6 +49,7 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
+obj-$(CONFIG_BCM_VK) += bcm-vk/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
deleted file mode 100644
index 7de7840f613c..000000000000
--- a/drivers/misc/atmel_tclib.c
+++ /dev/null
@@ -1,200 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/of.h>
-#include <soc/at91/atmel_tcb.h>
-
-/*
- * This is a thin library to solve the problem of how to portably allocate
- * one of the TC blocks. For simplicity, it doesn't currently expect to
- * share individual timers between different drivers.
- */
-
-#if defined(CONFIG_AVR32)
-/* AVR32 has these divide PBB */
-const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, };
-EXPORT_SYMBOL(atmel_tc_divisors);
-
-#elif defined(CONFIG_ARCH_AT91)
-/* AT91 has these divide MCK */
-const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
-EXPORT_SYMBOL(atmel_tc_divisors);
-
-#endif
-
-static DEFINE_SPINLOCK(tc_list_lock);
-static LIST_HEAD(tc_list);
-
-/**
- * atmel_tc_alloc - allocate a specified TC block
- * @block: which block to allocate
- *
- * Caller allocates a block. If it is available, a pointer to a
- * pre-initialized struct atmel_tc is returned. The caller can access
- * the registers directly through the "regs" field.
- */
-struct atmel_tc *atmel_tc_alloc(unsigned block)
-{
- struct atmel_tc *tc;
- struct platform_device *pdev = NULL;
-
- spin_lock(&tc_list_lock);
- list_for_each_entry(tc, &tc_list, node) {
- if (tc->allocated)
- continue;
-
- if ((tc->pdev->dev.of_node && tc->id == block) ||
- (tc->pdev->id == block)) {
- pdev = tc->pdev;
- tc->allocated = true;
- break;
- }
- }
- spin_unlock(&tc_list_lock);
-
- return pdev ? tc : NULL;
-}
-EXPORT_SYMBOL_GPL(atmel_tc_alloc);
-
-/**
- * atmel_tc_free - release a specified TC block
- * @tc: Timer/counter block that was returned by atmel_tc_alloc()
- *
- * This reverses the effect of atmel_tc_alloc(), invalidating the resource
- * returned by that routine and making the TC available to other drivers.
- */
-void atmel_tc_free(struct atmel_tc *tc)
-{
- spin_lock(&tc_list_lock);
- if (tc->allocated)
- tc->allocated = false;
- spin_unlock(&tc_list_lock);
-}
-EXPORT_SYMBOL_GPL(atmel_tc_free);
-
-#if defined(CONFIG_OF)
-static struct atmel_tcb_config tcb_rm9200_config = {
- .counter_width = 16,
-};
-
-static struct atmel_tcb_config tcb_sam9x5_config = {
- .counter_width = 32,
-};
-
-static const struct of_device_id atmel_tcb_dt_ids[] = {
- {
- .compatible = "atmel,at91rm9200-tcb",
- .data = &tcb_rm9200_config,
- }, {
- .compatible = "atmel,at91sam9x5-tcb",
- .data = &tcb_sam9x5_config,
- }, {
- /* sentinel */
- }
-};
-
-MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids);
-#endif
-
-static int __init tc_probe(struct platform_device *pdev)
-{
- struct atmel_tc *tc;
- struct clk *clk;
- int irq;
- unsigned int i;
-
- if (of_get_child_count(pdev->dev.of_node))
- return -EBUSY;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
-
- tc = devm_kzalloc(&pdev->dev, sizeof(struct atmel_tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
-
- tc->pdev = pdev;
-
- clk = devm_clk_get(&pdev->dev, "t0_clk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- tc->slow_clk = devm_clk_get(&pdev->dev, "slow_clk");
- if (IS_ERR(tc->slow_clk))
- return PTR_ERR(tc->slow_clk);
-
- tc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tc->regs))
- return PTR_ERR(tc->regs);
-
- /* Now take SoC information if available */
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node);
- if (match)
- tc->tcb_config = match->data;
-
- tc->id = of_alias_get_id(tc->pdev->dev.of_node, "tcb");
- } else {
- tc->id = pdev->id;
- }
-
- tc->clk[0] = clk;
- tc->clk[1] = devm_clk_get(&pdev->dev, "t1_clk");
- if (IS_ERR(tc->clk[1]))
- tc->clk[1] = clk;
- tc->clk[2] = devm_clk_get(&pdev->dev, "t2_clk");
- if (IS_ERR(tc->clk[2]))
- tc->clk[2] = clk;
-
- tc->irq[0] = irq;
- tc->irq[1] = platform_get_irq(pdev, 1);
- if (tc->irq[1] < 0)
- tc->irq[1] = irq;
- tc->irq[2] = platform_get_irq(pdev, 2);
- if (tc->irq[2] < 0)
- tc->irq[2] = irq;
-
- for (i = 0; i < 3; i++)
- writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR));
-
- spin_lock(&tc_list_lock);
- list_add_tail(&tc->node, &tc_list);
- spin_unlock(&tc_list_lock);
-
- platform_set_drvdata(pdev, tc);
-
- return 0;
-}
-
-static void tc_shutdown(struct platform_device *pdev)
-{
- int i;
- struct atmel_tc *tc = platform_get_drvdata(pdev);
-
- for (i = 0; i < 3; i++)
- writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR));
-}
-
-static struct platform_driver tc_driver = {
- .driver = {
- .name = "atmel_tcb",
- .of_match_table = of_match_ptr(atmel_tcb_dt_ids),
- },
- .shutdown = tc_shutdown,
-};
-
-static int __init tc_init(void)
-{
- return platform_driver_probe(&tc_driver, tc_probe);
-}
-arch_initcall(tc_init);
diff --git a/drivers/misc/bcm-vk/Kconfig b/drivers/misc/bcm-vk/Kconfig
new file mode 100644
index 000000000000..68a972772b99
--- /dev/null
+++ b/drivers/misc/bcm-vk/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Broadcom VK device
+#
+config BCM_VK
+ tristate "Support for Broadcom VK Accelerators"
+ depends on PCI_MSI
+ help
+ Select this option to enable support for Broadcom
+ VK Accelerators. VK is used for performing
+ multiple specific offload processing tasks in parallel.
+ Such offload tasks assist in such operations as video
+ transcoding, compression, and crypto tasks.
+ This driver enables userspace programs to access these
+ accelerators via /dev/bcm-vk.N devices.
+
+ If unsure, say N.
+
+config BCM_VK_TTY
+ bool "Enable tty ports on a Broadcom VK Accelerator device"
+ depends on TTY
+ depends on BCM_VK
+ help
+ Select this option to enable tty support to allow console
+ access to Broadcom VK Accelerator cards from host.
+
+ Device node will in the form /dev/bcm-vk.x_ttyVKy where:
+ x is the instance of the VK card
+ y is the tty device number on the VK card.
diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile
new file mode 100644
index 000000000000..1df2ebe851ca
--- /dev/null
+++ b/drivers/misc/bcm-vk/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Broadcom VK driver
+#
+
+obj-$(CONFIG_BCM_VK) += bcm_vk.o
+bcm_vk-objs := \
+ bcm_vk_dev.o \
+ bcm_vk_msg.o \
+ bcm_vk_sg.o
+
+bcm_vk-$(CONFIG_BCM_VK_TTY) += bcm_vk_tty.o
diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h
new file mode 100644
index 000000000000..a1338f375589
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk.h
@@ -0,0 +1,549 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef BCM_VK_H
+#define BCM_VK_H
+
+#include <linux/atomic.h>
+#include <linux/firmware.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched/signal.h>
+#include <linux/tty.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/misc/bcm_vk.h>
+
+#include "bcm_vk_msg.h"
+
+#define DRV_MODULE_NAME "bcm-vk"
+
+/*
+ * Load Image is completed in two stages:
+ *
+ * 1) When the VK device boot-up, M7 CPU runs and executes the BootROM.
+ * The Secure Boot Loader (SBL) as part of the BootROM will run
+ * to open up ITCM for host to push BOOT1 image.
+ * SBL will authenticate the image before jumping to BOOT1 image.
+ *
+ * 2) Because BOOT1 image is a secured image, we also called it the
+ * Secure Boot Image (SBI). At second stage, SBI will initialize DDR
+ * and wait for host to push BOOT2 image to DDR.
+ * SBI will authenticate the image before jumping to BOOT2 image.
+ *
+ */
+/* Location of registers of interest in BAR0 */
+
+/* Request register for Secure Boot Loader (SBL) download */
+#define BAR_CODEPUSH_SBL 0x400
+/* Start of ITCM */
+#define CODEPUSH_BOOT1_ENTRY 0x00400000
+#define CODEPUSH_MASK 0xfffff000
+#define CODEPUSH_BOOTSTART BIT(0)
+
+/* Boot Status register */
+#define BAR_BOOT_STATUS 0x404
+
+#define SRAM_OPEN BIT(16)
+#define DDR_OPEN BIT(17)
+
+/* Firmware loader progress status definitions */
+#define FW_LOADER_ACK_SEND_MORE_DATA BIT(18)
+#define FW_LOADER_ACK_IN_PROGRESS BIT(19)
+#define FW_LOADER_ACK_RCVD_ALL_DATA BIT(20)
+
+/* Boot1/2 is running in standalone mode */
+#define BOOT_STDALONE_RUNNING BIT(21)
+
+/* definitions for boot status register */
+#define BOOT_STATE_MASK (0xffffffff & \
+ ~(FW_LOADER_ACK_SEND_MORE_DATA | \
+ FW_LOADER_ACK_IN_PROGRESS | \
+ BOOT_STDALONE_RUNNING))
+
+#define BOOT_ERR_SHIFT 4
+#define BOOT_ERR_MASK (0xf << BOOT_ERR_SHIFT)
+#define BOOT_PROG_MASK 0xf
+
+#define BROM_STATUS_NOT_RUN 0x2
+#define BROM_NOT_RUN (SRAM_OPEN | BROM_STATUS_NOT_RUN)
+#define BROM_STATUS_COMPLETE 0x6
+#define BROM_RUNNING (SRAM_OPEN | BROM_STATUS_COMPLETE)
+#define BOOT1_STATUS_COMPLETE 0x6
+#define BOOT1_RUNNING (DDR_OPEN | BOOT1_STATUS_COMPLETE)
+#define BOOT2_STATUS_COMPLETE 0x6
+#define BOOT2_RUNNING (FW_LOADER_ACK_RCVD_ALL_DATA | \
+ BOOT2_STATUS_COMPLETE)
+
+/* Boot request for Secure Boot Image (SBI) */
+#define BAR_CODEPUSH_SBI 0x408
+/* 64M mapped to BAR2 */
+#define CODEPUSH_BOOT2_ENTRY 0x60000000
+
+#define BAR_CARD_STATUS 0x410
+/* CARD_STATUS definitions */
+#define CARD_STATUS_TTYVK0_READY BIT(0)
+#define CARD_STATUS_TTYVK1_READY BIT(1)
+
+#define BAR_BOOT1_STDALONE_PROGRESS 0x420
+#define BOOT1_STDALONE_SUCCESS (BIT(13) | BIT(14))
+#define BOOT1_STDALONE_PROGRESS_MASK BOOT1_STDALONE_SUCCESS
+
+#define BAR_METADATA_VERSION 0x440
+#define BAR_OS_UPTIME 0x444
+#define BAR_CHIP_ID 0x448
+#define MAJOR_SOC_REV(_chip_id) (((_chip_id) >> 20) & 0xf)
+
+#define BAR_CARD_TEMPERATURE 0x45c
+/* defines for all temperature sensor */
+#define BCM_VK_TEMP_FIELD_MASK 0xff
+#define BCM_VK_CPU_TEMP_SHIFT 0
+#define BCM_VK_DDR0_TEMP_SHIFT 8
+#define BCM_VK_DDR1_TEMP_SHIFT 16
+
+#define BAR_CARD_VOLTAGE 0x460
+/* defines for voltage rail conversion */
+#define BCM_VK_VOLT_RAIL_MASK 0xffff
+#define BCM_VK_3P3_VOLT_REG_SHIFT 16
+
+#define BAR_CARD_ERR_LOG 0x464
+/* Error log register bit definition - register for error alerts */
+#define ERR_LOG_UECC BIT(0)
+#define ERR_LOG_SSIM_BUSY BIT(1)
+#define ERR_LOG_AFBC_BUSY BIT(2)
+#define ERR_LOG_HIGH_TEMP_ERR BIT(3)
+#define ERR_LOG_WDOG_TIMEOUT BIT(4)
+#define ERR_LOG_SYS_FAULT BIT(5)
+#define ERR_LOG_RAMDUMP BIT(6)
+#define ERR_LOG_COP_WDOG_TIMEOUT BIT(7)
+/* warnings */
+#define ERR_LOG_MEM_ALLOC_FAIL BIT(8)
+#define ERR_LOG_LOW_TEMP_WARN BIT(9)
+#define ERR_LOG_ECC BIT(10)
+#define ERR_LOG_IPC_DWN BIT(11)
+
+/* Alert bit definitions detectd on host */
+#define ERR_LOG_HOST_INTF_V_FAIL BIT(13)
+#define ERR_LOG_HOST_HB_FAIL BIT(14)
+#define ERR_LOG_HOST_PCIE_DWN BIT(15)
+
+#define BAR_CARD_ERR_MEM 0x468
+/* defines for mem err, all fields have same width */
+#define BCM_VK_MEM_ERR_FIELD_MASK 0xff
+#define BCM_VK_ECC_MEM_ERR_SHIFT 0
+#define BCM_VK_UECC_MEM_ERR_SHIFT 8
+/* threshold of event occurrence and logs start to come out */
+#define BCM_VK_ECC_THRESHOLD 10
+#define BCM_VK_UECC_THRESHOLD 1
+
+#define BAR_CARD_PWR_AND_THRE 0x46c
+/* defines for power and temp threshold, all fields have same width */
+#define BCM_VK_PWR_AND_THRE_FIELD_MASK 0xff
+#define BCM_VK_LOW_TEMP_THRE_SHIFT 0
+#define BCM_VK_HIGH_TEMP_THRE_SHIFT 8
+#define BCM_VK_PWR_STATE_SHIFT 16
+
+#define BAR_CARD_STATIC_INFO 0x470
+
+#define BAR_INTF_VER 0x47c
+#define BAR_INTF_VER_MAJOR_SHIFT 16
+#define BAR_INTF_VER_MASK 0xffff
+/*
+ * major and minor semantic version numbers supported
+ * Please update as required on interface changes
+ */
+#define SEMANTIC_MAJOR 1
+#define SEMANTIC_MINOR 0
+
+/*
+ * first door bell reg, ie for queue = 0. Only need the first one, as
+ * we will use the queue number to derive the others
+ */
+#define VK_BAR0_REGSEG_DB_BASE 0x484
+#define VK_BAR0_REGSEG_DB_REG_GAP 8 /*
+ * DB register gap,
+ * DB1 at 0x48c and DB2 at 0x494
+ */
+
+/* reset register and specific values */
+#define VK_BAR0_RESET_DB_NUM 3
+#define VK_BAR0_RESET_DB_SOFT 0xffffffff
+#define VK_BAR0_RESET_DB_HARD 0xfffffffd
+#define VK_BAR0_RESET_RAMPDUMP 0xa0000000
+
+#define VK_BAR0_Q_DB_BASE(q_num) (VK_BAR0_REGSEG_DB_BASE + \
+ ((q_num) * VK_BAR0_REGSEG_DB_REG_GAP))
+#define VK_BAR0_RESET_DB_BASE (VK_BAR0_REGSEG_DB_BASE + \
+ (VK_BAR0_RESET_DB_NUM * VK_BAR0_REGSEG_DB_REG_GAP))
+
+#define BAR_BOOTSRC_SELECT 0xc78
+/* BOOTSRC definitions */
+#define BOOTSRC_SOFT_ENABLE BIT(14)
+
+/* Card OS Firmware version size */
+#define BAR_FIRMWARE_TAG_SIZE 50
+#define FIRMWARE_STATUS_PRE_INIT_DONE 0x1f
+
+/* VK MSG_ID defines */
+#define VK_MSG_ID_BITMAP_SIZE 4096
+#define VK_MSG_ID_BITMAP_MASK (VK_MSG_ID_BITMAP_SIZE - 1)
+#define VK_MSG_ID_OVERFLOW 0xffff
+
+/*
+ * BAR1
+ */
+
+/* BAR1 message q definition */
+
+/* indicate if msgq ctrl in BAR1 is populated */
+#define VK_BAR1_MSGQ_DEF_RDY 0x60c0
+/* ready marker value for the above location, normal boot2 */
+#define VK_BAR1_MSGQ_RDY_MARKER 0xbeefcafe
+/* ready marker value for the above location, normal boot2 */
+#define VK_BAR1_DIAG_RDY_MARKER 0xdeadcafe
+/* number of msgqs in BAR1 */
+#define VK_BAR1_MSGQ_NR 0x60c4
+/* BAR1 queue control structure offset */
+#define VK_BAR1_MSGQ_CTRL_OFF 0x60c8
+
+/* BAR1 ucode and boot1 version tag */
+#define VK_BAR1_UCODE_VER_TAG 0x6170
+#define VK_BAR1_BOOT1_VER_TAG 0x61b0
+#define VK_BAR1_VER_TAG_SIZE 64
+
+/* Memory to hold the DMA buffer memory address allocated for boot2 download */
+#define VK_BAR1_DMA_BUF_OFF_HI 0x61e0
+#define VK_BAR1_DMA_BUF_OFF_LO (VK_BAR1_DMA_BUF_OFF_HI + 4)
+#define VK_BAR1_DMA_BUF_SZ (VK_BAR1_DMA_BUF_OFF_HI + 8)
+
+/* Scratch memory allocated on host for VK */
+#define VK_BAR1_SCRATCH_OFF_HI 0x61f0
+#define VK_BAR1_SCRATCH_OFF_LO (VK_BAR1_SCRATCH_OFF_HI + 4)
+#define VK_BAR1_SCRATCH_SZ_ADDR (VK_BAR1_SCRATCH_OFF_HI + 8)
+#define VK_BAR1_SCRATCH_DEF_NR_PAGES 32
+
+/* BAR1 DAUTH info */
+#define VK_BAR1_DAUTH_BASE_ADDR 0x6200
+#define VK_BAR1_DAUTH_STORE_SIZE 0x48
+#define VK_BAR1_DAUTH_VALID_SIZE 0x8
+#define VK_BAR1_DAUTH_MAX 4
+#define VK_BAR1_DAUTH_STORE_ADDR(x) \
+ (VK_BAR1_DAUTH_BASE_ADDR + \
+ (x) * (VK_BAR1_DAUTH_STORE_SIZE + VK_BAR1_DAUTH_VALID_SIZE))
+#define VK_BAR1_DAUTH_VALID_ADDR(x) \
+ (VK_BAR1_DAUTH_STORE_ADDR(x) + VK_BAR1_DAUTH_STORE_SIZE)
+
+/* BAR1 SOTP AUTH and REVID info */
+#define VK_BAR1_SOTP_REVID_BASE_ADDR 0x6340
+#define VK_BAR1_SOTP_REVID_SIZE 0x10
+#define VK_BAR1_SOTP_REVID_MAX 2
+#define VK_BAR1_SOTP_REVID_ADDR(x) \
+ (VK_BAR1_SOTP_REVID_BASE_ADDR + (x) * VK_BAR1_SOTP_REVID_SIZE)
+
+/* VK device supports a maximum of 3 bars */
+#define MAX_BAR 3
+
+/* default number of msg blk for inband SGL */
+#define BCM_VK_DEF_IB_SGL_BLK_LEN 16
+#define BCM_VK_IB_SGL_BLK_MAX 24
+
+enum pci_barno {
+ BAR_0 = 0,
+ BAR_1,
+ BAR_2
+};
+
+#ifdef CONFIG_BCM_VK_TTY
+#define BCM_VK_NUM_TTY 2
+#else
+#define BCM_VK_NUM_TTY 0
+#endif
+
+struct bcm_vk_tty {
+ struct tty_port port;
+ u32 to_offset; /* bar offset to use */
+ u32 to_size; /* to VK buffer size */
+ u32 wr; /* write offset shadow */
+ u32 from_offset; /* bar offset to use */
+ u32 from_size; /* from VK buffer size */
+ u32 rd; /* read offset shadow */
+ pid_t pid;
+ bool irq_enabled;
+ bool is_opened; /* tracks tty open/close */
+};
+
+/* VK device max power state, supports 3, full, reduced and low */
+#define MAX_OPP 3
+#define MAX_CARD_INFO_TAG_SIZE 64
+
+struct bcm_vk_card_info {
+ u32 version;
+ char os_tag[MAX_CARD_INFO_TAG_SIZE];
+ char cmpt_tag[MAX_CARD_INFO_TAG_SIZE];
+ u32 cpu_freq_mhz;
+ u32 cpu_scale[MAX_OPP];
+ u32 ddr_freq_mhz;
+ u32 ddr_size_MB;
+ u32 video_core_freq_mhz;
+};
+
+/* DAUTH related info */
+struct bcm_vk_dauth_key {
+ char store[VK_BAR1_DAUTH_STORE_SIZE];
+ char valid[VK_BAR1_DAUTH_VALID_SIZE];
+};
+
+struct bcm_vk_dauth_info {
+ struct bcm_vk_dauth_key keys[VK_BAR1_DAUTH_MAX];
+};
+
+/*
+ * Control structure of logging messages from the card. This
+ * buffer is for logmsg that comes from vk
+ */
+struct bcm_vk_peer_log {
+ u32 rd_idx;
+ u32 wr_idx;
+ u32 buf_size;
+ u32 mask;
+ char data[0];
+};
+
+/* max buf size allowed */
+#define BCM_VK_PEER_LOG_BUF_MAX SZ_16K
+/* max size per line of peer log */
+#define BCM_VK_PEER_LOG_LINE_MAX 256
+
+/*
+ * single entry for processing type + utilization
+ */
+#define BCM_VK_PROC_TYPE_TAG_LEN 8
+struct bcm_vk_proc_mon_entry_t {
+ char tag[BCM_VK_PROC_TYPE_TAG_LEN];
+ u32 used;
+ u32 max; /**< max capacity */
+};
+
+/**
+ * Structure for run time utilization
+ */
+#define BCM_VK_PROC_MON_MAX 8 /* max entries supported */
+struct bcm_vk_proc_mon_info {
+ u32 num; /**< no of entries */
+ u32 entry_size; /**< per entry size */
+ struct bcm_vk_proc_mon_entry_t entries[BCM_VK_PROC_MON_MAX];
+};
+
+struct bcm_vk_hb_ctrl {
+ struct timer_list timer;
+ u32 last_uptime;
+ u32 lost_cnt;
+};
+
+struct bcm_vk_alert {
+ u16 flags;
+ u16 notfs;
+};
+
+/* some alert counters that the driver will keep track */
+struct bcm_vk_alert_cnts {
+ u16 ecc;
+ u16 uecc;
+};
+
+struct bcm_vk {
+ struct pci_dev *pdev;
+ void __iomem *bar[MAX_BAR];
+ int num_irqs;
+
+ struct bcm_vk_card_info card_info;
+ struct bcm_vk_proc_mon_info proc_mon_info;
+ struct bcm_vk_dauth_info dauth_info;
+
+ /* mutex to protect the ioctls */
+ struct mutex mutex;
+ struct miscdevice miscdev;
+ int devid; /* dev id allocated */
+
+#ifdef CONFIG_BCM_VK_TTY
+ struct tty_driver *tty_drv;
+ struct timer_list serial_timer;
+ struct bcm_vk_tty tty[BCM_VK_NUM_TTY];
+ struct workqueue_struct *tty_wq_thread;
+ struct work_struct tty_wq_work;
+#endif
+
+ /* Reference-counting to handle file operations */
+ struct kref kref;
+
+ spinlock_t msg_id_lock; /* Spinlock for msg_id */
+ u16 msg_id;
+ DECLARE_BITMAP(bmap, VK_MSG_ID_BITMAP_SIZE);
+ spinlock_t ctx_lock; /* Spinlock for component context */
+ struct bcm_vk_ctx ctx[VK_CMPT_CTX_MAX];
+ struct bcm_vk_ht_entry pid_ht[VK_PID_HT_SZ];
+ pid_t reset_pid; /* process that issue reset */
+
+ atomic_t msgq_inited; /* indicate if info has been synced with vk */
+ struct bcm_vk_msg_chan to_v_msg_chan;
+ struct bcm_vk_msg_chan to_h_msg_chan;
+
+ struct workqueue_struct *wq_thread;
+ struct work_struct wq_work; /* work queue for deferred job */
+ unsigned long wq_offload[1]; /* various flags on wq requested */
+ void *tdma_vaddr; /* test dma segment virtual addr */
+ dma_addr_t tdma_addr; /* test dma segment bus addr */
+
+ struct notifier_block panic_nb;
+ u32 ib_sgl_size; /* size allocated for inband sgl insertion */
+
+ /* heart beat mechanism control structure */
+ struct bcm_vk_hb_ctrl hb_ctrl;
+ /* house-keeping variable of error logs */
+ spinlock_t host_alert_lock; /* protection to access host_alert struct */
+ struct bcm_vk_alert host_alert;
+ struct bcm_vk_alert peer_alert; /* bits set by the card */
+ struct bcm_vk_alert_cnts alert_cnts;
+
+ /* offset of the peer log control in BAR2 */
+ u32 peerlog_off;
+ struct bcm_vk_peer_log peerlog_info; /* record of peer log info */
+ /* offset of processing monitoring info in BAR2 */
+ u32 proc_mon_off;
+};
+
+/* wq offload work items bits definitions */
+enum bcm_vk_wq_offload_flags {
+ BCM_VK_WQ_DWNLD_PEND = 0,
+ BCM_VK_WQ_DWNLD_AUTO = 1,
+ BCM_VK_WQ_NOTF_PEND = 2,
+};
+
+/* a macro to get an individual field with mask and shift */
+#define BCM_VK_EXTRACT_FIELD(_field, _reg, _mask, _shift) \
+ (_field = (((_reg) >> (_shift)) & (_mask)))
+
+struct bcm_vk_entry {
+ const u32 mask;
+ const u32 exp_val;
+ const char *str;
+};
+
+/* alerts that could be generated from peer */
+#define BCM_VK_PEER_ERR_NUM 12
+extern struct bcm_vk_entry const bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM];
+/* alerts detected by the host */
+#define BCM_VK_HOST_ERR_NUM 3
+extern struct bcm_vk_entry const bcm_vk_host_err[BCM_VK_HOST_ERR_NUM];
+
+/*
+ * check if PCIe interface is down on read. Use it when it is
+ * certain that _val should never be all ones.
+ */
+#define BCM_VK_INTF_IS_DOWN(val) ((val) == 0xffffffff)
+
+static inline u32 vkread32(struct bcm_vk *vk, enum pci_barno bar, u64 offset)
+{
+ return readl(vk->bar[bar] + offset);
+}
+
+static inline void vkwrite32(struct bcm_vk *vk,
+ u32 value,
+ enum pci_barno bar,
+ u64 offset)
+{
+ writel(value, vk->bar[bar] + offset);
+}
+
+static inline u8 vkread8(struct bcm_vk *vk, enum pci_barno bar, u64 offset)
+{
+ return readb(vk->bar[bar] + offset);
+}
+
+static inline void vkwrite8(struct bcm_vk *vk,
+ u8 value,
+ enum pci_barno bar,
+ u64 offset)
+{
+ writeb(value, vk->bar[bar] + offset);
+}
+
+static inline bool bcm_vk_msgq_marker_valid(struct bcm_vk *vk)
+{
+ u32 rdy_marker = 0;
+ u32 fw_status;
+
+ fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
+
+ if ((fw_status & VK_FWSTS_READY) == VK_FWSTS_READY)
+ rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
+
+ return (rdy_marker == VK_BAR1_MSGQ_RDY_MARKER);
+}
+
+int bcm_vk_open(struct inode *inode, struct file *p_file);
+ssize_t bcm_vk_read(struct file *p_file, char __user *buf, size_t count,
+ loff_t *f_pos);
+ssize_t bcm_vk_write(struct file *p_file, const char __user *buf,
+ size_t count, loff_t *f_pos);
+__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait);
+int bcm_vk_release(struct inode *inode, struct file *p_file);
+void bcm_vk_release_data(struct kref *kref);
+irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id);
+irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id);
+irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id);
+int bcm_vk_msg_init(struct bcm_vk *vk);
+void bcm_vk_msg_remove(struct bcm_vk *vk);
+void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk);
+int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync);
+void bcm_vk_blk_drv_access(struct bcm_vk *vk);
+s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk);
+int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
+ const pid_t pid, const u32 q_num);
+void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val);
+int bcm_vk_auto_load_all_images(struct bcm_vk *vk);
+void bcm_vk_hb_init(struct bcm_vk *vk);
+void bcm_vk_hb_deinit(struct bcm_vk *vk);
+void bcm_vk_handle_notf(struct bcm_vk *vk);
+bool bcm_vk_drv_access_ok(struct bcm_vk *vk);
+void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask);
+
+#ifdef CONFIG_BCM_VK_TTY
+int bcm_vk_tty_init(struct bcm_vk *vk, char *name);
+void bcm_vk_tty_exit(struct bcm_vk *vk);
+void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk);
+void bcm_vk_tty_wq_exit(struct bcm_vk *vk);
+
+static inline void bcm_vk_tty_set_irq_enabled(struct bcm_vk *vk, int index)
+{
+ vk->tty[index].irq_enabled = true;
+}
+#else
+static inline int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
+{
+ return 0;
+}
+
+static inline void bcm_vk_tty_exit(struct bcm_vk *vk)
+{
+}
+
+static inline void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk)
+{
+}
+
+static inline void bcm_vk_tty_wq_exit(struct bcm_vk *vk)
+{
+}
+
+static inline void bcm_vk_tty_set_irq_enabled(struct bcm_vk *vk, int index)
+{
+}
+#endif /* CONFIG_BCM_VK_TTY */
+
+#endif
diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c
new file mode 100644
index 000000000000..6bfea3210389
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_dev.c
@@ -0,0 +1,1652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <uapi/linux/misc/bcm_vk.h>
+
+#include "bcm_vk.h"
+
+#define PCI_DEVICE_ID_VALKYRIE 0x5e87
+#define PCI_DEVICE_ID_VIPER 0x5e88
+
+static DEFINE_IDA(bcm_vk_ida);
+
+enum soc_idx {
+ VALKYRIE_A0 = 0,
+ VALKYRIE_B0,
+ VIPER,
+ VK_IDX_INVALID
+};
+
+enum img_idx {
+ IMG_PRI = 0,
+ IMG_SEC,
+ IMG_PER_TYPE_MAX
+};
+
+struct load_image_entry {
+ const u32 image_type;
+ const char *image_name[IMG_PER_TYPE_MAX];
+};
+
+#define NUM_BOOT_STAGES 2
+/* default firmware images names */
+static const struct load_image_entry image_tab[][NUM_BOOT_STAGES] = {
+ [VALKYRIE_A0] = {
+ {VK_IMAGE_TYPE_BOOT1, {"vk_a0-boot1.bin", "vk-boot1.bin"}},
+ {VK_IMAGE_TYPE_BOOT2, {"vk_a0-boot2.bin", "vk-boot2.bin"}}
+ },
+ [VALKYRIE_B0] = {
+ {VK_IMAGE_TYPE_BOOT1, {"vk_b0-boot1.bin", "vk-boot1.bin"}},
+ {VK_IMAGE_TYPE_BOOT2, {"vk_b0-boot2.bin", "vk-boot2.bin"}}
+ },
+
+ [VIPER] = {
+ {VK_IMAGE_TYPE_BOOT1, {"vp-boot1.bin", ""}},
+ {VK_IMAGE_TYPE_BOOT2, {"vp-boot2.bin", ""}}
+ },
+};
+
+/* Location of memory base addresses of interest in BAR1 */
+/* Load Boot1 to start of ITCM */
+#define BAR1_CODEPUSH_BASE_BOOT1 0x100000
+
+/* Allow minimum 1s for Load Image timeout responses */
+#define LOAD_IMAGE_TIMEOUT_MS (1 * MSEC_PER_SEC)
+
+/* Image startup timeouts */
+#define BOOT1_STARTUP_TIMEOUT_MS (5 * MSEC_PER_SEC)
+#define BOOT2_STARTUP_TIMEOUT_MS (10 * MSEC_PER_SEC)
+
+/* 1ms wait for checking the transfer complete status */
+#define TXFR_COMPLETE_TIMEOUT_MS 1
+
+/* MSIX usages */
+#define VK_MSIX_MSGQ_MAX 3
+#define VK_MSIX_NOTF_MAX 1
+#define VK_MSIX_TTY_MAX BCM_VK_NUM_TTY
+#define VK_MSIX_IRQ_MAX (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX + \
+ VK_MSIX_TTY_MAX)
+#define VK_MSIX_IRQ_MIN_REQ (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX)
+
+/* Number of bits set in DMA mask*/
+#define BCM_VK_DMA_BITS 64
+
+/* Ucode boot wait time */
+#define BCM_VK_UCODE_BOOT_US (100 * USEC_PER_MSEC)
+/* 50% margin */
+#define BCM_VK_UCODE_BOOT_MAX_US ((BCM_VK_UCODE_BOOT_US * 3) >> 1)
+
+/* deinit time for the card os after receiving doorbell */
+#define BCM_VK_DEINIT_TIME_MS (2 * MSEC_PER_SEC)
+
+/*
+ * module parameters
+ */
+static bool auto_load = true;
+module_param(auto_load, bool, 0444);
+MODULE_PARM_DESC(auto_load,
+ "Load images automatically at PCIe probe time.\n");
+static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES;
+module_param(nr_scratch_pages, uint, 0444);
+MODULE_PARM_DESC(nr_scratch_pages,
+ "Number of pre allocated DMAable coherent pages.\n");
+static uint nr_ib_sgl_blk = BCM_VK_DEF_IB_SGL_BLK_LEN;
+module_param(nr_ib_sgl_blk, uint, 0444);
+MODULE_PARM_DESC(nr_ib_sgl_blk,
+ "Number of in-band msg blks for short SGL.\n");
+
+/*
+ * alerts that could be generated from peer
+ */
+const struct bcm_vk_entry bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM] = {
+ {ERR_LOG_UECC, ERR_LOG_UECC, "uecc"},
+ {ERR_LOG_SSIM_BUSY, ERR_LOG_SSIM_BUSY, "ssim_busy"},
+ {ERR_LOG_AFBC_BUSY, ERR_LOG_AFBC_BUSY, "afbc_busy"},
+ {ERR_LOG_HIGH_TEMP_ERR, ERR_LOG_HIGH_TEMP_ERR, "high_temp"},
+ {ERR_LOG_WDOG_TIMEOUT, ERR_LOG_WDOG_TIMEOUT, "wdog_timeout"},
+ {ERR_LOG_SYS_FAULT, ERR_LOG_SYS_FAULT, "sys_fault"},
+ {ERR_LOG_RAMDUMP, ERR_LOG_RAMDUMP, "ramdump"},
+ {ERR_LOG_COP_WDOG_TIMEOUT, ERR_LOG_COP_WDOG_TIMEOUT,
+ "cop_wdog_timeout"},
+ {ERR_LOG_MEM_ALLOC_FAIL, ERR_LOG_MEM_ALLOC_FAIL, "malloc_fail warn"},
+ {ERR_LOG_LOW_TEMP_WARN, ERR_LOG_LOW_TEMP_WARN, "low_temp warn"},
+ {ERR_LOG_ECC, ERR_LOG_ECC, "ecc"},
+ {ERR_LOG_IPC_DWN, ERR_LOG_IPC_DWN, "ipc_down"},
+};
+
+/* alerts detected by the host */
+const struct bcm_vk_entry bcm_vk_host_err[BCM_VK_HOST_ERR_NUM] = {
+ {ERR_LOG_HOST_PCIE_DWN, ERR_LOG_HOST_PCIE_DWN, "PCIe_down"},
+ {ERR_LOG_HOST_HB_FAIL, ERR_LOG_HOST_HB_FAIL, "hb_fail"},
+ {ERR_LOG_HOST_INTF_V_FAIL, ERR_LOG_HOST_INTF_V_FAIL, "intf_ver_fail"},
+};
+
+irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id)
+{
+ struct bcm_vk *vk = dev_id;
+
+ if (!bcm_vk_drv_access_ok(vk)) {
+ dev_err(&vk->pdev->dev,
+ "Interrupt %d received when msgq not inited\n", irq);
+ goto skip_schedule_work;
+ }
+
+ /* if notification is not pending, set bit and schedule work */
+ if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
+ queue_work(vk->wq_thread, &vk->wq_work);
+
+skip_schedule_work:
+ return IRQ_HANDLED;
+}
+
+static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ u32 reg;
+ u16 major, minor;
+ int ret = 0;
+
+ /* read interface register */
+ reg = vkread32(vk, BAR_0, BAR_INTF_VER);
+ major = (reg >> BAR_INTF_VER_MAJOR_SHIFT) & BAR_INTF_VER_MASK;
+ minor = reg & BAR_INTF_VER_MASK;
+
+ /*
+ * if major number is 0, it is pre-release and it would be allowed
+ * to continue, else, check versions accordingly
+ */
+ if (!major) {
+ dev_warn(dev, "Pre-release major.minor=%d.%d - drv %d.%d\n",
+ major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
+ } else if (major != SEMANTIC_MAJOR) {
+ dev_err(dev,
+ "Intf major.minor=%d.%d rejected - drv %d.%d\n",
+ major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
+ bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL);
+ ret = -EPFNOSUPPORT;
+ } else {
+ dev_dbg(dev,
+ "Intf major.minor=%d.%d passed - drv %d.%d\n",
+ major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
+ }
+ return ret;
+}
+
+static void bcm_vk_log_notf(struct bcm_vk *vk,
+ struct bcm_vk_alert *alert,
+ struct bcm_vk_entry const *entry_tab,
+ const u32 table_size)
+{
+ u32 i;
+ u32 masked_val, latched_val;
+ struct bcm_vk_entry const *entry;
+ u32 reg;
+ u16 ecc_mem_err, uecc_mem_err;
+ struct device *dev = &vk->pdev->dev;
+
+ for (i = 0; i < table_size; i++) {
+ entry = &entry_tab[i];
+ masked_val = entry->mask & alert->notfs;
+ latched_val = entry->mask & alert->flags;
+
+ if (masked_val == ERR_LOG_UECC) {
+ /*
+ * if there is difference between stored cnt and it
+ * is greater than threshold, log it.
+ */
+ reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
+ BCM_VK_EXTRACT_FIELD(uecc_mem_err, reg,
+ BCM_VK_MEM_ERR_FIELD_MASK,
+ BCM_VK_UECC_MEM_ERR_SHIFT);
+ if ((uecc_mem_err != vk->alert_cnts.uecc) &&
+ (uecc_mem_err >= BCM_VK_UECC_THRESHOLD))
+ dev_info(dev,
+ "ALERT! %s.%d uecc RAISED - ErrCnt %d\n",
+ DRV_MODULE_NAME, vk->devid,
+ uecc_mem_err);
+ vk->alert_cnts.uecc = uecc_mem_err;
+ } else if (masked_val == ERR_LOG_ECC) {
+ reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
+ BCM_VK_EXTRACT_FIELD(ecc_mem_err, reg,
+ BCM_VK_MEM_ERR_FIELD_MASK,
+ BCM_VK_ECC_MEM_ERR_SHIFT);
+ if ((ecc_mem_err != vk->alert_cnts.ecc) &&
+ (ecc_mem_err >= BCM_VK_ECC_THRESHOLD))
+ dev_info(dev, "ALERT! %s.%d ecc RAISED - ErrCnt %d\n",
+ DRV_MODULE_NAME, vk->devid,
+ ecc_mem_err);
+ vk->alert_cnts.ecc = ecc_mem_err;
+ } else if (masked_val != latched_val) {
+ /* print a log as info */
+ dev_info(dev, "ALERT! %s.%d %s %s\n",
+ DRV_MODULE_NAME, vk->devid, entry->str,
+ masked_val ? "RAISED" : "CLEARED");
+ }
+ }
+}
+
+static void bcm_vk_dump_peer_log(struct bcm_vk *vk)
+{
+ struct bcm_vk_peer_log log;
+ struct bcm_vk_peer_log *log_info = &vk->peerlog_info;
+ char loc_buf[BCM_VK_PEER_LOG_LINE_MAX];
+ int cnt;
+ struct device *dev = &vk->pdev->dev;
+ unsigned int data_offset;
+
+ memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log));
+
+ dev_dbg(dev, "Peer PANIC: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
+ log.buf_size, log.mask, log.rd_idx, log.wr_idx);
+
+ if (!log_info->buf_size) {
+ dev_err(dev, "Peer log dump disabled - skipped!\n");
+ return;
+ }
+
+ /* perform range checking for rd/wr idx */
+ if ((log.rd_idx > log_info->mask) ||
+ (log.wr_idx > log_info->mask) ||
+ (log.buf_size != log_info->buf_size) ||
+ (log.mask != log_info->mask)) {
+ dev_err(dev,
+ "Corrupted Ptrs: Size 0x%x(0x%x) Mask 0x%x(0x%x) [Rd Wr] = [%d %d], skip log dump.\n",
+ log_info->buf_size, log.buf_size,
+ log_info->mask, log.mask,
+ log.rd_idx, log.wr_idx);
+ return;
+ }
+
+ cnt = 0;
+ data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log);
+ loc_buf[BCM_VK_PEER_LOG_LINE_MAX - 1] = '\0';
+ while (log.rd_idx != log.wr_idx) {
+ loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx);
+
+ if ((loc_buf[cnt] == '\0') ||
+ (cnt == (BCM_VK_PEER_LOG_LINE_MAX - 1))) {
+ dev_err(dev, "%s", loc_buf);
+ cnt = 0;
+ } else {
+ cnt++;
+ }
+ log.rd_idx = (log.rd_idx + 1) & log.mask;
+ }
+ /* update rd idx at the end */
+ vkwrite32(vk, log.rd_idx, BAR_2,
+ vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx));
+}
+
+void bcm_vk_handle_notf(struct bcm_vk *vk)
+{
+ u32 reg;
+ struct bcm_vk_alert alert;
+ bool intf_down;
+ unsigned long flags;
+
+ /* handle peer alerts and then locally detected ones */
+ reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG);
+ intf_down = BCM_VK_INTF_IS_DOWN(reg);
+ if (!intf_down) {
+ vk->peer_alert.notfs = reg;
+ bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err,
+ ARRAY_SIZE(bcm_vk_peer_err));
+ vk->peer_alert.flags = vk->peer_alert.notfs;
+ } else {
+ /* turn off access */
+ bcm_vk_blk_drv_access(vk);
+ }
+
+ /* check and make copy of alert with lock and then free lock */
+ spin_lock_irqsave(&vk->host_alert_lock, flags);
+ if (intf_down)
+ vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN;
+
+ alert = vk->host_alert;
+ vk->host_alert.flags = vk->host_alert.notfs;
+ spin_unlock_irqrestore(&vk->host_alert_lock, flags);
+
+ /* call display with copy */
+ bcm_vk_log_notf(vk, &alert, bcm_vk_host_err,
+ ARRAY_SIZE(bcm_vk_host_err));
+
+ /*
+ * If it is a sys fault or heartbeat timeout, we would like extract
+ * log msg from the card so that we would know what is the last fault
+ */
+ if (!intf_down &&
+ ((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) ||
+ (vk->peer_alert.flags & ERR_LOG_SYS_FAULT)))
+ bcm_vk_dump_peer_log(vk);
+}
+
+static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar,
+ u64 offset, u32 mask, u32 value,
+ unsigned long timeout_ms)
+{
+ struct device *dev = &vk->pdev->dev;
+ unsigned long start_time;
+ unsigned long timeout;
+ u32 rd_val, boot_status;
+
+ start_time = jiffies;
+ timeout = start_time + msecs_to_jiffies(timeout_ms);
+
+ do {
+ rd_val = vkread32(vk, bar, offset);
+ dev_dbg(dev, "BAR%d Offset=0x%llx: 0x%x\n",
+ bar, offset, rd_val);
+
+ /* check for any boot err condition */
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ if (boot_status & BOOT_ERR_MASK) {
+ dev_err(dev, "Boot Err 0x%x, progress 0x%x after %d ms\n",
+ (boot_status & BOOT_ERR_MASK) >> BOOT_ERR_SHIFT,
+ boot_status & BOOT_PROG_MASK,
+ jiffies_to_msecs(jiffies - start_time));
+ return -EFAULT;
+ }
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ cpu_relax();
+ cond_resched();
+ } while ((rd_val & mask) != value);
+
+ return 0;
+}
+
+static void bcm_vk_get_card_info(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ u32 offset;
+ int i;
+ u8 *dst;
+ struct bcm_vk_card_info *info = &vk->card_info;
+
+ /* first read the offset from spare register */
+ offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO);
+ offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1);
+
+ /* based on the offset, read info to internal card info structure */
+ dst = (u8 *)info;
+ for (i = 0; i < sizeof(*info); i++)
+ *dst++ = vkread8(vk, BAR_2, offset++);
+
+#define CARD_INFO_LOG_FMT "version : %x\n" \
+ "os_tag : %s\n" \
+ "cmpt_tag : %s\n" \
+ "cpu_freq : %d MHz\n" \
+ "cpu_scale : %d full, %d lowest\n" \
+ "ddr_freq : %d MHz\n" \
+ "ddr_size : %d MB\n" \
+ "video_freq: %d MHz\n"
+ dev_dbg(dev, CARD_INFO_LOG_FMT, info->version, info->os_tag,
+ info->cmpt_tag, info->cpu_freq_mhz, info->cpu_scale[0],
+ info->cpu_scale[MAX_OPP - 1], info->ddr_freq_mhz,
+ info->ddr_size_MB, info->video_core_freq_mhz);
+
+ /*
+ * get the peer log pointer, only need the offset, and get record
+ * of the log buffer information which would be used for checking
+ * before dump, in case the BAR2 memory has been corrupted.
+ */
+ vk->peerlog_off = offset;
+ memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off,
+ sizeof(vk->peerlog_info));
+
+ /*
+ * Do a range checking and if out of bound, the record will be zeroed
+ * which guarantees that nothing would be dumped. In other words,
+ * peer dump is disabled.
+ */
+ if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) ||
+ (vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) ||
+ (vk->peerlog_info.rd_idx > vk->peerlog_info.mask) ||
+ (vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) {
+ dev_err(dev, "Peer log disabled - range error: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
+ vk->peerlog_info.buf_size,
+ vk->peerlog_info.mask,
+ vk->peerlog_info.rd_idx,
+ vk->peerlog_info.wr_idx);
+ memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
+ } else {
+ dev_dbg(dev, "Peer log: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
+ vk->peerlog_info.buf_size,
+ vk->peerlog_info.mask,
+ vk->peerlog_info.rd_idx,
+ vk->peerlog_info.wr_idx);
+ }
+}
+
+static void bcm_vk_get_proc_mon_info(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_proc_mon_info *mon = &vk->proc_mon_info;
+ u32 num, entry_size, offset, buf_size;
+ u8 *dst;
+
+ /* calculate offset which is based on peerlog offset */
+ buf_size = vkread32(vk, BAR_2,
+ vk->peerlog_off
+ + offsetof(struct bcm_vk_peer_log, buf_size));
+ offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log)
+ + buf_size;
+
+ /* first read the num and entry size */
+ num = vkread32(vk, BAR_2, offset);
+ entry_size = vkread32(vk, BAR_2, offset + sizeof(num));
+
+ /* check for max allowed */
+ if (num > BCM_VK_PROC_MON_MAX) {
+ dev_err(dev, "Processing monitoring entry %d exceeds max %d\n",
+ num, BCM_VK_PROC_MON_MAX);
+ return;
+ }
+ mon->num = num;
+ mon->entry_size = entry_size;
+
+ vk->proc_mon_off = offset;
+
+ /* read it once that will capture those static info */
+ dst = (u8 *)&mon->entries[0];
+ offset += sizeof(num) + sizeof(entry_size);
+ memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size);
+}
+
+static int bcm_vk_sync_card_info(struct bcm_vk *vk)
+{
+ u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
+
+ /* check for marker, but allow diags mode to skip sync */
+ if (!bcm_vk_msgq_marker_valid(vk))
+ return (rdy_marker == VK_BAR1_DIAG_RDY_MARKER ? 0 : -EINVAL);
+
+ /*
+ * Write down scratch addr which is used for DMA. For
+ * signed part, BAR1 is accessible only after boot2 has come
+ * up
+ */
+ if (vk->tdma_addr) {
+ vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1,
+ VK_BAR1_SCRATCH_OFF_HI);
+ vkwrite32(vk, (u32)vk->tdma_addr, BAR_1,
+ VK_BAR1_SCRATCH_OFF_LO);
+ vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1,
+ VK_BAR1_SCRATCH_SZ_ADDR);
+ }
+
+ /* get static card info, only need to read once */
+ bcm_vk_get_card_info(vk);
+
+ /* get the proc mon info once */
+ bcm_vk_get_proc_mon_info(vk);
+
+ return 0;
+}
+
+void bcm_vk_blk_drv_access(struct bcm_vk *vk)
+{
+ int i;
+
+ /*
+ * kill all the apps except for the process that is resetting.
+ * If not called during reset, reset_pid will be 0, and all will be
+ * killed.
+ */
+ spin_lock(&vk->ctx_lock);
+
+ /* set msgq_inited to 0 so that all rd/wr will be blocked */
+ atomic_set(&vk->msgq_inited, 0);
+
+ for (i = 0; i < VK_PID_HT_SZ; i++) {
+ struct bcm_vk_ctx *ctx;
+
+ list_for_each_entry(ctx, &vk->pid_ht[i].head, node) {
+ if (ctx->pid != vk->reset_pid) {
+ dev_dbg(&vk->pdev->dev,
+ "Send kill signal to pid %d\n",
+ ctx->pid);
+ kill_pid(find_vpid(ctx->pid), SIGKILL, 1);
+ }
+ }
+ }
+ bcm_vk_tty_terminate_tty_user(vk);
+ spin_unlock(&vk->ctx_lock);
+}
+
+static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp,
+ dma_addr_t host_buf_addr, u32 buf_size)
+{
+ /* update the dma address to the card */
+ vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1,
+ VK_BAR1_DMA_BUF_OFF_HI);
+ vkwrite32(vk, (u32)host_buf_addr, BAR_1,
+ VK_BAR1_DMA_BUF_OFF_LO);
+ vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ);
+}
+
+static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type,
+ const char *filename)
+{
+ struct device *dev = &vk->pdev->dev;
+ const struct firmware *fw = NULL;
+ void *bufp = NULL;
+ size_t max_buf, offset;
+ int ret;
+ u64 offset_codepush;
+ u32 codepush;
+ u32 value;
+ dma_addr_t boot_dma_addr;
+ bool is_stdalone;
+
+ if (load_type == VK_IMAGE_TYPE_BOOT1) {
+ /*
+ * After POR, enable VK soft BOOTSRC so bootrom do not clear
+ * the pushed image (the TCM memories).
+ */
+ value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT);
+ value |= BOOTSRC_SOFT_ENABLE;
+ vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT);
+
+ codepush = CODEPUSH_BOOTSTART + CODEPUSH_BOOT1_ENTRY;
+ offset_codepush = BAR_CODEPUSH_SBL;
+
+ /* Write a 1 to request SRAM open bit */
+ vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush);
+
+ /* Wait for VK to respond */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN,
+ SRAM_OPEN, LOAD_IMAGE_TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(dev, "boot1 wait SRAM err - ret(%d)\n", ret);
+ goto err_buf_out;
+ }
+
+ max_buf = SZ_256K;
+ bufp = dma_alloc_coherent(dev,
+ max_buf,
+ &boot_dma_addr, GFP_KERNEL);
+ if (!bufp) {
+ dev_err(dev, "Error allocating 0x%zx\n", max_buf);
+ ret = -ENOMEM;
+ goto err_buf_out;
+ }
+ } else if (load_type == VK_IMAGE_TYPE_BOOT2) {
+ codepush = CODEPUSH_BOOT2_ENTRY;
+ offset_codepush = BAR_CODEPUSH_SBI;
+
+ /* Wait for VK to respond */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN,
+ DDR_OPEN, LOAD_IMAGE_TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(dev, "boot2 wait DDR open error - ret(%d)\n",
+ ret);
+ goto err_buf_out;
+ }
+
+ max_buf = SZ_4M;
+ bufp = dma_alloc_coherent(dev,
+ max_buf,
+ &boot_dma_addr, GFP_KERNEL);
+ if (!bufp) {
+ dev_err(dev, "Error allocating 0x%zx\n", max_buf);
+ ret = -ENOMEM;
+ goto err_buf_out;
+ }
+
+ bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf);
+ } else {
+ dev_err(dev, "Error invalid image type 0x%x\n", load_type);
+ ret = -EINVAL;
+ goto err_buf_out;
+ }
+
+ offset = 0;
+ ret = request_partial_firmware_into_buf(&fw, filename, dev,
+ bufp, max_buf, offset);
+ if (ret) {
+ dev_err(dev, "Error %d requesting firmware file: %s\n",
+ ret, filename);
+ goto err_firmware_out;
+ }
+ dev_dbg(dev, "size=0x%zx\n", fw->size);
+ if (load_type == VK_IMAGE_TYPE_BOOT1)
+ memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1,
+ bufp,
+ fw->size);
+
+ dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", codepush, offset_codepush);
+ vkwrite32(vk, codepush, BAR_0, offset_codepush);
+
+ if (load_type == VK_IMAGE_TYPE_BOOT1) {
+ u32 boot_status;
+
+ /* wait until done */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
+ BOOT1_RUNNING,
+ BOOT1_RUNNING,
+ BOOT1_STARTUP_TIMEOUT_MS);
+
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ is_stdalone = !BCM_VK_INTF_IS_DOWN(boot_status) &&
+ (boot_status & BOOT_STDALONE_RUNNING);
+ if (ret && !is_stdalone) {
+ dev_err(dev,
+ "Timeout %ld ms waiting for boot1 to come up - ret(%d)\n",
+ BOOT1_STARTUP_TIMEOUT_MS, ret);
+ goto err_firmware_out;
+ } else if (is_stdalone) {
+ u32 reg;
+
+ reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS);
+ if ((reg & BOOT1_STDALONE_PROGRESS_MASK) ==
+ BOOT1_STDALONE_SUCCESS) {
+ dev_info(dev, "Boot1 standalone success\n");
+ ret = 0;
+ } else {
+ dev_err(dev, "Timeout %ld ms - Boot1 standalone failure\n",
+ BOOT1_STARTUP_TIMEOUT_MS);
+ ret = -EINVAL;
+ goto err_firmware_out;
+ }
+ }
+ } else if (load_type == VK_IMAGE_TYPE_BOOT2) {
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
+
+ /* To send more data to VK than max_buf allowed at a time */
+ do {
+ /*
+ * Check for ack from card. when Ack is received,
+ * it means all the data is received by card.
+ * Exit the loop after ack is received.
+ */
+ ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
+ FW_LOADER_ACK_RCVD_ALL_DATA,
+ FW_LOADER_ACK_RCVD_ALL_DATA,
+ TXFR_COMPLETE_TIMEOUT_MS);
+ if (ret == 0) {
+ dev_dbg(dev, "Exit boot2 download\n");
+ break;
+ } else if (ret == -EFAULT) {
+ dev_err(dev, "Error detected during ACK waiting");
+ goto err_firmware_out;
+ }
+
+ /* exit the loop, if there is no response from card */
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "Error. No reply from card\n");
+ ret = -ETIMEDOUT;
+ goto err_firmware_out;
+ }
+
+ /* Wait for VK to open BAR space to copy new data */
+ ret = bcm_vk_wait(vk, BAR_0, offset_codepush,
+ codepush, 0,
+ TXFR_COMPLETE_TIMEOUT_MS);
+ if (ret == 0) {
+ offset += max_buf;
+ ret = request_partial_firmware_into_buf
+ (&fw,
+ filename,
+ dev, bufp,
+ max_buf,
+ offset);
+ if (ret) {
+ dev_err(dev,
+ "Error %d requesting firmware file: %s offset: 0x%zx\n",
+ ret, filename, offset);
+ goto err_firmware_out;
+ }
+ dev_dbg(dev, "size=0x%zx\n", fw->size);
+ dev_dbg(dev, "Signaling 0x%x to 0x%llx\n",
+ codepush, offset_codepush);
+ vkwrite32(vk, codepush, BAR_0, offset_codepush);
+ /* reload timeout after every codepush */
+ timeout = jiffies +
+ msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
+ } else if (ret == -EFAULT) {
+ dev_err(dev, "Error detected waiting for transfer\n");
+ goto err_firmware_out;
+ }
+ } while (1);
+
+ /* wait for fw status bits to indicate app ready */
+ ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS,
+ VK_FWSTS_READY,
+ VK_FWSTS_READY,
+ BOOT2_STARTUP_TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(dev, "Boot2 not ready - ret(%d)\n", ret);
+ goto err_firmware_out;
+ }
+
+ is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) &
+ BOOT_STDALONE_RUNNING;
+ if (!is_stdalone) {
+ ret = bcm_vk_intf_ver_chk(vk);
+ if (ret) {
+ dev_err(dev, "failure in intf version check\n");
+ goto err_firmware_out;
+ }
+
+ /*
+ * Next, initialize Message Q if we are loading boot2.
+ * Do a force sync
+ */
+ ret = bcm_vk_sync_msgq(vk, true);
+ if (ret) {
+ dev_err(dev, "Boot2 Error reading comm msg Q info\n");
+ ret = -EIO;
+ goto err_firmware_out;
+ }
+
+ /* sync & channel other info */
+ ret = bcm_vk_sync_card_info(vk);
+ if (ret) {
+ dev_err(dev, "Syncing Card Info failure\n");
+ goto err_firmware_out;
+ }
+ }
+ }
+
+err_firmware_out:
+ release_firmware(fw);
+
+err_buf_out:
+ if (bufp)
+ dma_free_coherent(dev, max_buf, bufp, boot_dma_addr);
+
+ return ret;
+}
+
+static u32 bcm_vk_next_boot_image(struct bcm_vk *vk)
+{
+ u32 boot_status;
+ u32 fw_status;
+ u32 load_type = 0; /* default for unknown */
+
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
+
+ if (!BCM_VK_INTF_IS_DOWN(boot_status) && (boot_status & SRAM_OPEN))
+ load_type = VK_IMAGE_TYPE_BOOT1;
+ else if (boot_status == BOOT1_RUNNING)
+ load_type = VK_IMAGE_TYPE_BOOT2;
+
+ /* Log status so that we know different stages */
+ dev_info(&vk->pdev->dev,
+ "boot-status value for next image: 0x%x : fw-status 0x%x\n",
+ boot_status, fw_status);
+
+ return load_type;
+}
+
+static enum soc_idx get_soc_idx(struct bcm_vk *vk)
+{
+ struct pci_dev *pdev = vk->pdev;
+ enum soc_idx idx = VK_IDX_INVALID;
+ u32 rev;
+ static enum soc_idx const vk_soc_tab[] = { VALKYRIE_A0, VALKYRIE_B0 };
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_VALKYRIE:
+ /* get the chip id to decide sub-class */
+ rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID));
+ if (rev < ARRAY_SIZE(vk_soc_tab)) {
+ idx = vk_soc_tab[rev];
+ } else {
+ /* Default to A0 firmware for all other chip revs */
+ idx = VALKYRIE_A0;
+ dev_warn(&pdev->dev,
+ "Rev %d not in image lookup table, default to idx=%d\n",
+ rev, idx);
+ }
+ break;
+
+ case PCI_DEVICE_ID_VIPER:
+ idx = VIPER;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "no images for 0x%x\n", pdev->device);
+ }
+ return idx;
+}
+
+static const char *get_load_fw_name(struct bcm_vk *vk,
+ const struct load_image_entry *entry)
+{
+ const struct firmware *fw;
+ struct device *dev = &vk->pdev->dev;
+ int ret;
+ unsigned long dummy;
+ int i;
+
+ for (i = 0; i < IMG_PER_TYPE_MAX; i++) {
+ fw = NULL;
+ ret = request_partial_firmware_into_buf(&fw,
+ entry->image_name[i],
+ dev, &dummy,
+ sizeof(dummy),
+ 0);
+ release_firmware(fw);
+ if (!ret)
+ return entry->image_name[i];
+ }
+ return NULL;
+}
+
+int bcm_vk_auto_load_all_images(struct bcm_vk *vk)
+{
+ int i, ret = -1;
+ enum soc_idx idx;
+ struct device *dev = &vk->pdev->dev;
+ u32 curr_type;
+ const char *curr_name;
+
+ idx = get_soc_idx(vk);
+ if (idx == VK_IDX_INVALID)
+ goto auto_load_all_exit;
+
+ /* log a message to know the relative loading order */
+ dev_dbg(dev, "Load All for device %d\n", vk->devid);
+
+ for (i = 0; i < NUM_BOOT_STAGES; i++) {
+ curr_type = image_tab[idx][i].image_type;
+ if (bcm_vk_next_boot_image(vk) == curr_type) {
+ curr_name = get_load_fw_name(vk, &image_tab[idx][i]);
+ if (!curr_name) {
+ dev_err(dev, "No suitable firmware exists for type %d",
+ curr_type);
+ ret = -ENOENT;
+ goto auto_load_all_exit;
+ }
+ ret = bcm_vk_load_image_by_type(vk, curr_type,
+ curr_name);
+ dev_info(dev, "Auto load %s, ret %d\n",
+ curr_name, ret);
+
+ if (ret) {
+ dev_err(dev, "Error loading default %s\n",
+ curr_name);
+ goto auto_load_all_exit;
+ }
+ }
+ }
+
+auto_load_all_exit:
+ return ret;
+}
+
+static int bcm_vk_trigger_autoload(struct bcm_vk *vk)
+{
+ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0)
+ return -EPERM;
+
+ set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
+ queue_work(vk->wq_thread, &vk->wq_work);
+
+ return 0;
+}
+
+/*
+ * deferred work queue for draining and auto download.
+ */
+static void bcm_vk_wq_handler(struct work_struct *work)
+{
+ struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work);
+ struct device *dev = &vk->pdev->dev;
+ s32 ret;
+
+ /* check wq offload bit map to perform various operations */
+ if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) {
+ /* clear bit right the way for notification */
+ clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload);
+ bcm_vk_handle_notf(vk);
+ }
+ if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) {
+ bcm_vk_auto_load_all_images(vk);
+
+ /*
+ * at the end of operation, clear AUTO bit and pending
+ * bit
+ */
+ clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
+ clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
+ }
+
+ /* next, try to drain */
+ ret = bcm_to_h_msg_dequeue(vk);
+
+ if (ret == 0)
+ dev_dbg(dev, "Spurious trigger for workqueue\n");
+ else if (ret < 0)
+ bcm_vk_blk_drv_access(vk);
+}
+
+static long bcm_vk_load_image(struct bcm_vk *vk,
+ const struct vk_image __user *arg)
+{
+ struct device *dev = &vk->pdev->dev;
+ const char *image_name;
+ struct vk_image image;
+ u32 next_loadable;
+ enum soc_idx idx;
+ int image_idx;
+ int ret = -EPERM;
+
+ if (copy_from_user(&image, arg, sizeof(image)))
+ return -EACCES;
+
+ if ((image.type != VK_IMAGE_TYPE_BOOT1) &&
+ (image.type != VK_IMAGE_TYPE_BOOT2)) {
+ dev_err(dev, "invalid image.type %u\n", image.type);
+ return ret;
+ }
+
+ next_loadable = bcm_vk_next_boot_image(vk);
+ if (next_loadable != image.type) {
+ dev_err(dev, "Next expected image %u, Loading %u\n",
+ next_loadable, image.type);
+ return ret;
+ }
+
+ /*
+ * if something is pending download already. This could only happen
+ * for now when the driver is being loaded, or if someone has issued
+ * another download command in another shell.
+ */
+ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
+ dev_err(dev, "Download operation already pending.\n");
+ return ret;
+ }
+
+ image_name = image.filename;
+ if (image_name[0] == '\0') {
+ /* Use default image name if NULL */
+ idx = get_soc_idx(vk);
+ if (idx == VK_IDX_INVALID)
+ goto err_idx;
+
+ /* Image idx starts with boot1 */
+ image_idx = image.type - VK_IMAGE_TYPE_BOOT1;
+ image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]);
+ if (!image_name) {
+ dev_err(dev, "No suitable image found for type %d",
+ image.type);
+ ret = -ENOENT;
+ goto err_idx;
+ }
+ } else {
+ /* Ensure filename is NULL terminated */
+ image.filename[sizeof(image.filename) - 1] = '\0';
+ }
+ ret = bcm_vk_load_image_by_type(vk, image.type, image_name);
+ dev_info(dev, "Load %s, ret %d\n", image_name, ret);
+err_idx:
+ clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
+
+ return ret;
+}
+
+static int bcm_vk_reset_successful(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ u32 fw_status, reset_reason;
+ int ret = -EAGAIN;
+
+ /*
+ * Reset could be triggered when the card in several state:
+ * i) in bootROM
+ * ii) after boot1
+ * iii) boot2 running
+ *
+ * i) & ii) - no status bits will be updated. If vkboot1
+ * runs automatically after reset, it will update the reason
+ * to be unknown reason
+ * iii) - reboot reason match + deinit done.
+ */
+ fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
+ /* immediate exit if interface goes down */
+ if (BCM_VK_INTF_IS_DOWN(fw_status)) {
+ dev_err(dev, "PCIe Intf Down!\n");
+ goto reset_exit;
+ }
+
+ reset_reason = (fw_status & VK_FWSTS_RESET_REASON_MASK);
+ if ((reset_reason == VK_FWSTS_RESET_MBOX_DB) ||
+ (reset_reason == VK_FWSTS_RESET_UNKNOWN))
+ ret = 0;
+
+ /*
+ * if some of the deinit bits are set, but done
+ * bit is not, this is a failure if triggered while boot2 is running
+ */
+ if ((fw_status & VK_FWSTS_DEINIT_TRIGGERED) &&
+ !(fw_status & VK_FWSTS_RESET_DONE))
+ ret = -EAGAIN;
+
+reset_exit:
+ dev_dbg(dev, "FW status = 0x%x ret %d\n", fw_status, ret);
+
+ return ret;
+}
+
+static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val)
+{
+ vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE);
+}
+
+static int bcm_vk_trigger_reset(struct bcm_vk *vk)
+{
+ u32 i;
+ u32 value, boot_status;
+ bool is_stdalone, is_boot2;
+ static const u32 bar0_reg_clr_list[] = { BAR_OS_UPTIME,
+ BAR_INTF_VER,
+ BAR_CARD_VOLTAGE,
+ BAR_CARD_TEMPERATURE,
+ BAR_CARD_PWR_AND_THRE };
+
+ /* clean up before pressing the door bell */
+ bcm_vk_drain_msg_on_reset(vk);
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
+ /* make tag '\0' terminated */
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG);
+
+ for (i = 0; i < VK_BAR1_DAUTH_MAX; i++) {
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i));
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i));
+ }
+ for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++)
+ vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i));
+
+ memset(&vk->card_info, 0, sizeof(vk->card_info));
+ memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
+ memset(&vk->proc_mon_info, 0, sizeof(vk->proc_mon_info));
+ memset(&vk->alert_cnts, 0, sizeof(vk->alert_cnts));
+
+ /*
+ * When boot request fails, the CODE_PUSH_OFFSET stays persistent.
+ * Allowing us to debug the failure. When we call reset,
+ * we should clear CODE_PUSH_OFFSET so ROM does not execute
+ * boot again (and fails again) and instead waits for a new
+ * codepush. And, if previous boot has encountered error, need
+ * to clear the entry values
+ */
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ if (boot_status & BOOT_ERR_MASK) {
+ dev_info(&vk->pdev->dev,
+ "Card in boot error 0x%x, clear CODEPUSH val\n",
+ boot_status);
+ value = 0;
+ } else {
+ value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL);
+ value &= CODEPUSH_MASK;
+ }
+ vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL);
+
+ /* special reset handling */
+ is_stdalone = boot_status & BOOT_STDALONE_RUNNING;
+ is_boot2 = (boot_status & BOOT_STATE_MASK) == BOOT2_RUNNING;
+ if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) {
+ /*
+ * if card is in ramdump mode, it is hitting an error. Don't
+ * reset the reboot reason as it will contain valid info that
+ * is important - simply use special reset
+ */
+ vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS);
+ return VK_BAR0_RESET_RAMPDUMP;
+ } else if (is_stdalone && !is_boot2) {
+ dev_info(&vk->pdev->dev, "Hard reset on Standalone mode");
+ bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
+ return VK_BAR0_RESET_DB_HARD;
+ }
+
+ /* reset fw_status with proper reason, and press db */
+ vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS);
+ bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT);
+
+ /* clear other necessary registers and alert records */
+ for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++)
+ vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]);
+ memset(&vk->host_alert, 0, sizeof(vk->host_alert));
+ memset(&vk->peer_alert, 0, sizeof(vk->peer_alert));
+ /* clear 4096 bits of bitmap */
+ bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE);
+
+ return 0;
+}
+
+static long bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg)
+{
+ struct device *dev = &vk->pdev->dev;
+ struct vk_reset reset;
+ int ret = 0;
+ u32 ramdump_reset;
+ int special_reset;
+
+ if (copy_from_user(&reset, arg, sizeof(struct vk_reset)))
+ return -EFAULT;
+
+ /* check if any download is in-progress, if so return error */
+ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
+ dev_err(dev, "Download operation pending - skip reset.\n");
+ return -EPERM;
+ }
+
+ ramdump_reset = vk->peer_alert.flags & ERR_LOG_RAMDUMP;
+ dev_info(dev, "Issue Reset %s\n",
+ ramdump_reset ? "in ramdump mode" : "");
+
+ /*
+ * The following is the sequence of reset:
+ * - send card level graceful shut down
+ * - wait enough time for VK to handle its business, stopping DMA etc
+ * - kill host apps
+ * - Trigger interrupt with DB
+ */
+ bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0);
+
+ spin_lock(&vk->ctx_lock);
+ if (!vk->reset_pid) {
+ vk->reset_pid = task_pid_nr(current);
+ } else {
+ dev_err(dev, "Reset already launched by process pid %d\n",
+ vk->reset_pid);
+ ret = -EACCES;
+ }
+ spin_unlock(&vk->ctx_lock);
+ if (ret)
+ goto err_exit;
+
+ bcm_vk_blk_drv_access(vk);
+ special_reset = bcm_vk_trigger_reset(vk);
+
+ /*
+ * Wait enough time for card os to deinit
+ * and populate the reset reason.
+ */
+ msleep(BCM_VK_DEINIT_TIME_MS);
+
+ if (special_reset) {
+ /* if it is special ramdump reset, return the type to user */
+ reset.arg2 = special_reset;
+ if (copy_to_user(arg, &reset, sizeof(reset)))
+ ret = -EFAULT;
+ } else {
+ ret = bcm_vk_reset_successful(vk);
+ }
+
+err_exit:
+ clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
+ return ret;
+}
+
+static int bcm_vk_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct bcm_vk_ctx *ctx = file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ unsigned long pg_size;
+
+ /* only BAR2 is mmap possible, which is bar num 4 due to 64bit */
+#define VK_MMAPABLE_BAR 4
+
+ pg_size = ((pci_resource_len(vk->pdev, VK_MMAPABLE_BAR) - 1)
+ >> PAGE_SHIFT) + 1;
+ if (vma->vm_pgoff + vma_pages(vma) > pg_size)
+ return -EINVAL;
+
+ vma->vm_pgoff += (pci_resource_start(vk->pdev, VK_MMAPABLE_BAR)
+ >> PAGE_SHIFT);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -EINVAL;
+ struct bcm_vk_ctx *ctx = file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ void __user *argp = (void __user *)arg;
+
+ dev_dbg(&vk->pdev->dev,
+ "ioctl, cmd=0x%02x, arg=0x%02lx\n",
+ cmd, arg);
+
+ mutex_lock(&vk->mutex);
+
+ switch (cmd) {
+ case VK_IOCTL_LOAD_IMAGE:
+ ret = bcm_vk_load_image(vk, argp);
+ break;
+
+ case VK_IOCTL_RESET:
+ ret = bcm_vk_reset(vk, argp);
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&vk->mutex);
+
+ return ret;
+}
+
+static const struct file_operations bcm_vk_fops = {
+ .owner = THIS_MODULE,
+ .open = bcm_vk_open,
+ .read = bcm_vk_read,
+ .write = bcm_vk_write,
+ .poll = bcm_vk_poll,
+ .release = bcm_vk_release,
+ .mmap = bcm_vk_mmap,
+ .unlocked_ioctl = bcm_vk_ioctl,
+};
+
+static int bcm_vk_on_panic(struct notifier_block *nb,
+ unsigned long e, void *p)
+{
+ struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb);
+
+ bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
+
+ return 0;
+}
+
+static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ int i;
+ int id;
+ int irq;
+ char name[20];
+ struct bcm_vk *vk;
+ struct device *dev = &pdev->dev;
+ struct miscdevice *misc_device;
+ u32 boot_status;
+
+ /* allocate vk structure which is tied to kref for freeing */
+ vk = kzalloc(sizeof(*vk), GFP_KERNEL);
+ if (!vk)
+ return -ENOMEM;
+
+ kref_init(&vk->kref);
+ if (nr_ib_sgl_blk > BCM_VK_IB_SGL_BLK_MAX) {
+ dev_warn(dev, "Inband SGL blk %d limited to max %d\n",
+ nr_ib_sgl_blk, BCM_VK_IB_SGL_BLK_MAX);
+ nr_ib_sgl_blk = BCM_VK_IB_SGL_BLK_MAX;
+ }
+ vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE;
+ mutex_init(&vk->mutex);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device\n");
+ goto err_free_exit;
+ }
+ vk->pdev = pci_dev_get(pdev);
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ /* make sure DMA is good */
+ err = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(BCM_VK_DMA_BITS));
+ if (err) {
+ dev_err(dev, "failed to set DMA mask\n");
+ goto err_disable_pdev;
+ }
+
+ /* The tdma is a scratch area for some DMA testings. */
+ if (nr_scratch_pages) {
+ vk->tdma_vaddr = dma_alloc_coherent
+ (dev,
+ nr_scratch_pages * PAGE_SIZE,
+ &vk->tdma_addr, GFP_KERNEL);
+ if (!vk->tdma_vaddr) {
+ err = -ENOMEM;
+ goto err_disable_pdev;
+ }
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vk);
+
+ irq = pci_alloc_irq_vectors(pdev,
+ 1,
+ VK_MSIX_IRQ_MAX,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+
+ if (irq < VK_MSIX_IRQ_MIN_REQ) {
+ dev_err(dev, "failed to get min %d MSIX interrupts, irq(%d)\n",
+ VK_MSIX_IRQ_MIN_REQ, irq);
+ err = (irq >= 0) ? -EINVAL : irq;
+ goto err_disable_pdev;
+ }
+
+ if (irq != VK_MSIX_IRQ_MAX)
+ dev_warn(dev, "Number of IRQs %d allocated - requested(%d).\n",
+ irq, VK_MSIX_IRQ_MAX);
+
+ for (i = 0; i < MAX_BAR; i++) {
+ /* multiple by 2 for 64 bit BAR mapping */
+ vk->bar[i] = pci_ioremap_bar(pdev, i * 2);
+ if (!vk->bar[i]) {
+ dev_err(dev, "failed to remap BAR%d\n", i);
+ err = -ENOMEM;
+ goto err_iounmap;
+ }
+ }
+
+ for (vk->num_irqs = 0;
+ vk->num_irqs < VK_MSIX_MSGQ_MAX;
+ vk->num_irqs++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
+ bcm_vk_msgq_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, vk);
+ if (err) {
+ dev_err(dev, "failed to request msgq IRQ %d for MSIX %d\n",
+ pdev->irq + vk->num_irqs, vk->num_irqs + 1);
+ goto err_irq;
+ }
+ }
+ /* one irq for notification from VK */
+ err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
+ bcm_vk_notf_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, vk);
+ if (err) {
+ dev_err(dev, "failed to request notf IRQ %d for MSIX %d\n",
+ pdev->irq + vk->num_irqs, vk->num_irqs + 1);
+ goto err_irq;
+ }
+ vk->num_irqs++;
+
+ for (i = 0;
+ (i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq);
+ i++, vk->num_irqs++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
+ bcm_vk_tty_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, vk);
+ if (err) {
+ dev_err(dev, "failed request tty IRQ %d for MSIX %d\n",
+ pdev->irq + vk->num_irqs, vk->num_irqs + 1);
+ goto err_irq;
+ }
+ bcm_vk_tty_set_irq_enabled(vk, i);
+ }
+
+ id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+ dev_err(dev, "unable to get id\n");
+ goto err_irq;
+ }
+
+ vk->devid = id;
+ snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+ misc_device = &vk->miscdev;
+ misc_device->minor = MISC_DYNAMIC_MINOR;
+ misc_device->name = kstrdup(name, GFP_KERNEL);
+ if (!misc_device->name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
+ misc_device->fops = &bcm_vk_fops,
+
+ err = misc_register(misc_device);
+ if (err) {
+ dev_err(dev, "failed to register device\n");
+ goto err_kfree_name;
+ }
+
+ INIT_WORK(&vk->wq_work, bcm_vk_wq_handler);
+
+ /* create dedicated workqueue */
+ vk->wq_thread = create_singlethread_workqueue(name);
+ if (!vk->wq_thread) {
+ dev_err(dev, "Fail to create workqueue thread\n");
+ err = -ENOMEM;
+ goto err_misc_deregister;
+ }
+
+ err = bcm_vk_msg_init(vk);
+ if (err) {
+ dev_err(dev, "failed to init msg queue info\n");
+ goto err_destroy_workqueue;
+ }
+
+ /* sync other info */
+ bcm_vk_sync_card_info(vk);
+
+ /* register for panic notifier */
+ vk->panic_nb.notifier_call = bcm_vk_on_panic;
+ err = atomic_notifier_chain_register(&panic_notifier_list,
+ &vk->panic_nb);
+ if (err) {
+ dev_err(dev, "Fail to register panic notifier\n");
+ goto err_destroy_workqueue;
+ }
+
+ snprintf(name, sizeof(name), KBUILD_MODNAME ".%d_ttyVK", id);
+ err = bcm_vk_tty_init(vk, name);
+ if (err)
+ goto err_unregister_panic_notifier;
+
+ /*
+ * lets trigger an auto download. We don't want to do it serially here
+ * because at probing time, it is not supposed to block for a long time.
+ */
+ boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ if (auto_load) {
+ if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) {
+ err = bcm_vk_trigger_autoload(vk);
+ if (err)
+ goto err_bcm_vk_tty_exit;
+ } else {
+ dev_err(dev,
+ "Auto-load skipped - BROM not in proper state (0x%x)\n",
+ boot_status);
+ }
+ }
+
+ /* enable hb */
+ bcm_vk_hb_init(vk);
+
+ dev_dbg(dev, "BCM-VK:%u created\n", id);
+
+ return 0;
+
+err_bcm_vk_tty_exit:
+ bcm_vk_tty_exit(vk);
+
+err_unregister_panic_notifier:
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &vk->panic_nb);
+
+err_destroy_workqueue:
+ destroy_workqueue(vk->wq_thread);
+
+err_misc_deregister:
+ misc_deregister(misc_device);
+
+err_kfree_name:
+ kfree(misc_device->name);
+ misc_device->name = NULL;
+
+err_ida_remove:
+ ida_simple_remove(&bcm_vk_ida, id);
+
+err_irq:
+ for (i = 0; i < vk->num_irqs; i++)
+ devm_free_irq(dev, pci_irq_vector(pdev, i), vk);
+
+ pci_disable_msix(pdev);
+ pci_disable_msi(pdev);
+
+err_iounmap:
+ for (i = 0; i < MAX_BAR; i++) {
+ if (vk->bar[i])
+ pci_iounmap(pdev, vk->bar[i]);
+ }
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ if (vk->tdma_vaddr)
+ dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
+ vk->tdma_vaddr, vk->tdma_addr);
+
+ pci_free_irq_vectors(pdev);
+ pci_disable_device(pdev);
+ pci_dev_put(pdev);
+
+err_free_exit:
+ kfree(vk);
+
+ return err;
+}
+
+void bcm_vk_release_data(struct kref *kref)
+{
+ struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref);
+ struct pci_dev *pdev = vk->pdev;
+
+ dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk);
+ pci_dev_put(pdev);
+ kfree(vk);
+}
+
+static void bcm_vk_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct bcm_vk *vk = pci_get_drvdata(pdev);
+ struct miscdevice *misc_device = &vk->miscdev;
+
+ bcm_vk_hb_deinit(vk);
+
+ /*
+ * Trigger a reset to card and wait enough time for UCODE to rerun,
+ * which re-initialize the card into its default state.
+ * This ensures when driver is re-enumerated it will start from
+ * a completely clean state.
+ */
+ bcm_vk_trigger_reset(vk);
+ usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US);
+
+ /* unregister panic notifier */
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &vk->panic_nb);
+
+ bcm_vk_msg_remove(vk);
+ bcm_vk_tty_exit(vk);
+
+ if (vk->tdma_vaddr)
+ dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
+ vk->tdma_vaddr, vk->tdma_addr);
+
+ /* remove if name is set which means misc dev registered */
+ if (misc_device->name) {
+ misc_deregister(misc_device);
+ kfree(misc_device->name);
+ ida_simple_remove(&bcm_vk_ida, vk->devid);
+ }
+ for (i = 0; i < vk->num_irqs; i++)
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
+
+ pci_disable_msix(pdev);
+ pci_disable_msi(pdev);
+
+ cancel_work_sync(&vk->wq_work);
+ destroy_workqueue(vk->wq_thread);
+ bcm_vk_tty_wq_exit(vk);
+
+ for (i = 0; i < MAX_BAR; i++) {
+ if (vk->bar[i])
+ pci_iounmap(pdev, vk->bar[i]);
+ }
+
+ dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid);
+
+ pci_release_regions(pdev);
+ pci_free_irq_vectors(pdev);
+ pci_disable_device(pdev);
+
+ kref_put(&vk->kref, bcm_vk_release_data);
+}
+
+static void bcm_vk_shutdown(struct pci_dev *pdev)
+{
+ struct bcm_vk *vk = pci_get_drvdata(pdev);
+ u32 reg, boot_stat;
+
+ reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
+ boot_stat = reg & BOOT_STATE_MASK;
+
+ if (boot_stat == BOOT1_RUNNING) {
+ /* simply trigger a reset interrupt to park it */
+ bcm_vk_trigger_reset(vk);
+ } else if (boot_stat == BROM_NOT_RUN) {
+ int err;
+ u16 lnksta;
+
+ /*
+ * The boot status only reflects boot condition since last reset
+ * As ucode will run only once to configure pcie, if multiple
+ * resets happen, we lost track if ucode has run or not.
+ * Here, read the current link speed and use that to
+ * sync up the bootstatus properly so that on reboot-back-up,
+ * it has the proper state to start with autoload
+ */
+ err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+ if (!err &&
+ (lnksta & PCI_EXP_LNKSTA_CLS) != PCI_EXP_LNKSTA_CLS_2_5GB) {
+ reg |= BROM_STATUS_COMPLETE;
+ vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS);
+ }
+ }
+}
+
+static const struct pci_device_id bcm_vk_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VALKYRIE), },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VIPER), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, bcm_vk_ids);
+
+static struct pci_driver pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bcm_vk_ids,
+ .probe = bcm_vk_probe,
+ .remove = bcm_vk_remove,
+ .shutdown = bcm_vk_shutdown,
+};
+module_pci_driver(pci_driver);
+
+MODULE_DESCRIPTION("Broadcom VK Host Driver");
+MODULE_AUTHOR("Scott Branden <scott.branden@broadcom.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
new file mode 100644
index 000000000000..f40cf08a6192
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -0,0 +1,1357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/hash.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "bcm_vk.h"
+#include "bcm_vk_msg.h"
+#include "bcm_vk_sg.h"
+
+/* functions to manipulate the transport id in msg block */
+#define BCM_VK_MSG_Q_SHIFT 4
+#define BCM_VK_MSG_Q_MASK 0xF
+#define BCM_VK_MSG_ID_MASK 0xFFF
+
+#define BCM_VK_DMA_DRAIN_MAX_MS 2000
+
+/* number x q_size will be the max number of msg processed per loop */
+#define BCM_VK_MSG_PROC_MAX_LOOP 2
+
+/* module parameter */
+static bool hb_mon = true;
+module_param(hb_mon, bool, 0444);
+MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n");
+static int batch_log = 1;
+module_param(batch_log, int, 0444);
+MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n");
+
+static bool hb_mon_is_on(void)
+{
+ return hb_mon;
+}
+
+static u32 get_q_num(const struct vk_msg_blk *msg)
+{
+ u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK;
+
+ if (q_num >= VK_MSGQ_PER_CHAN_MAX)
+ q_num = VK_MSGQ_NUM_DEFAULT;
+ return q_num;
+}
+
+static void set_q_num(struct vk_msg_blk *msg, u32 q_num)
+{
+ u32 trans_q;
+
+ if (q_num >= VK_MSGQ_PER_CHAN_MAX)
+ trans_q = VK_MSGQ_NUM_DEFAULT;
+ else
+ trans_q = q_num;
+
+ msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q;
+}
+
+static u32 get_msg_id(const struct vk_msg_blk *msg)
+{
+ return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK);
+}
+
+static void set_msg_id(struct vk_msg_blk *msg, u32 val)
+{
+ msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg);
+}
+
+static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc)
+{
+ return ((idx + inc) & qinfo->q_mask);
+}
+
+static
+struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo,
+ u32 idx)
+{
+ return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx);
+}
+
+static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq,
+ const struct bcm_vk_sync_qinfo *qinfo)
+{
+ u32 wr_idx, rd_idx;
+
+ wr_idx = readl_relaxed(&msgq->wr_idx);
+ rd_idx = readl_relaxed(&msgq->rd_idx);
+
+ return ((wr_idx - rd_idx) & qinfo->q_mask);
+}
+
+static
+u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq,
+ const struct bcm_vk_sync_qinfo *qinfo)
+{
+ return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1);
+}
+
+/* number of retries when enqueue message fails before returning EAGAIN */
+#define BCM_VK_H2VK_ENQ_RETRY 10
+#define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50
+
+bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
+{
+ return (!!atomic_read(&vk->msgq_inited));
+}
+
+void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
+{
+ struct bcm_vk_alert *alert = &vk->host_alert;
+ unsigned long flags;
+
+ /* use irqsave version as this maybe called inside timer interrupt */
+ spin_lock_irqsave(&vk->host_alert_lock, flags);
+ alert->notfs |= bit_mask;
+ spin_unlock_irqrestore(&vk->host_alert_lock, flags);
+
+ if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
+ queue_work(vk->wq_thread, &vk->wq_work);
+}
+
+/*
+ * Heartbeat related defines
+ * The heartbeat from host is a last resort. If stuck condition happens
+ * on the card, firmware is supposed to detect it. Therefore, the heartbeat
+ * values used will be more relaxed on the driver, which need to be bigger
+ * than the watchdog timeout on the card. The watchdog timeout on the card
+ * is 20s, with a jitter of 2s => 22s. We use a value of 27s here.
+ */
+#define BCM_VK_HB_TIMER_S 3
+#define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
+#define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
+
+static void bcm_vk_hb_poll(struct timer_list *t)
+{
+ u32 uptime_s;
+ struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl,
+ timer);
+ struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
+
+ if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
+ /* read uptime from register and compare */
+ uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
+
+ if (uptime_s == hb->last_uptime)
+ hb->lost_cnt++;
+ else /* reset to avoid accumulation */
+ hb->lost_cnt = 0;
+
+ dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
+ hb->last_uptime, uptime_s, hb->lost_cnt);
+
+ /*
+ * if the interface goes down without any activity, a value
+ * of 0xFFFFFFFF will be continuously read, and the detection
+ * will be happened eventually.
+ */
+ hb->last_uptime = uptime_s;
+ } else {
+ /* reset heart beat lost cnt */
+ hb->lost_cnt = 0;
+ }
+
+ /* next, check if heartbeat exceeds limit */
+ if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) {
+ dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
+ BCM_VK_HB_LOST_MAX,
+ BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S);
+
+ bcm_vk_blk_drv_access(vk);
+ bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
+ }
+ /* re-arm timer */
+ mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
+}
+
+void bcm_vk_hb_init(struct bcm_vk *vk)
+{
+ struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
+
+ timer_setup(&hb->timer, bcm_vk_hb_poll, 0);
+ mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
+}
+
+void bcm_vk_hb_deinit(struct bcm_vk *vk)
+{
+ struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
+
+ del_timer(&hb->timer);
+}
+
+static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
+ unsigned int start,
+ unsigned int nbits)
+{
+ spin_lock(&vk->msg_id_lock);
+ bitmap_clear(vk->bmap, start, nbits);
+ spin_unlock(&vk->msg_id_lock);
+}
+
+/*
+ * allocate a ctx per file struct
+ */
+static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
+{
+ u32 i;
+ struct bcm_vk_ctx *ctx = NULL;
+ u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
+
+ spin_lock(&vk->ctx_lock);
+
+ /* check if it is in reset, if so, don't allow */
+ if (vk->reset_pid) {
+ dev_err(&vk->pdev->dev,
+ "No context allowed during reset by pid %d\n",
+ vk->reset_pid);
+
+ goto in_reset_exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
+ if (!vk->ctx[i].in_use) {
+ vk->ctx[i].in_use = true;
+ ctx = &vk->ctx[i];
+ break;
+ }
+ }
+
+ if (!ctx) {
+ dev_err(&vk->pdev->dev, "All context in use\n");
+
+ goto all_in_use_exit;
+ }
+
+ /* set the pid and insert it to hash table */
+ ctx->pid = pid;
+ ctx->hash_idx = hash_idx;
+ list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
+
+ /* increase kref */
+ kref_get(&vk->kref);
+
+ /* clear counter */
+ atomic_set(&ctx->pend_cnt, 0);
+ atomic_set(&ctx->dma_cnt, 0);
+ init_waitqueue_head(&ctx->rd_wq);
+
+all_in_use_exit:
+in_reset_exit:
+ spin_unlock(&vk->ctx_lock);
+
+ return ctx;
+}
+
+static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
+{
+ u16 rc = VK_MSG_ID_OVERFLOW;
+ u16 test_bit_count = 0;
+
+ spin_lock(&vk->msg_id_lock);
+ while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) {
+ /*
+ * first time come in this loop, msg_id will be 0
+ * and the first one tested will be 1. We skip
+ * VK_SIMPLEX_MSG_ID (0) for one way host2vk
+ * communication
+ */
+ vk->msg_id++;
+ if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
+ vk->msg_id = 1;
+
+ if (test_bit(vk->msg_id, vk->bmap)) {
+ test_bit_count++;
+ continue;
+ }
+ rc = vk->msg_id;
+ bitmap_set(vk->bmap, vk->msg_id, 1);
+ break;
+ }
+ spin_unlock(&vk->msg_id_lock);
+
+ return rc;
+}
+
+static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
+{
+ u32 idx;
+ u32 hash_idx;
+ pid_t pid;
+ struct bcm_vk_ctx *entry;
+ int count = 0;
+
+ if (!ctx) {
+ dev_err(&vk->pdev->dev, "NULL context detected\n");
+ return -EINVAL;
+ }
+ idx = ctx->idx;
+ pid = ctx->pid;
+
+ spin_lock(&vk->ctx_lock);
+
+ if (!vk->ctx[idx].in_use) {
+ dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
+ } else {
+ vk->ctx[idx].in_use = false;
+ vk->ctx[idx].miscdev = NULL;
+
+ /* Remove it from hash list and see if it is the last one. */
+ list_del(&ctx->node);
+ hash_idx = ctx->hash_idx;
+ list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
+ if (entry->pid == pid)
+ count++;
+ }
+ }
+
+ spin_unlock(&vk->ctx_lock);
+
+ return count;
+}
+
+static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry)
+{
+ int proc_cnt;
+
+ bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt);
+ if (proc_cnt)
+ atomic_dec(&entry->ctx->dma_cnt);
+
+ kfree(entry->to_h_msg);
+ kfree(entry);
+}
+
+static void bcm_vk_drain_all_pend(struct device *dev,
+ struct bcm_vk_msg_chan *chan,
+ struct bcm_vk_ctx *ctx)
+{
+ u32 num;
+ struct bcm_vk_wkent *entry, *tmp;
+ struct bcm_vk *vk;
+ struct list_head del_q;
+
+ if (ctx)
+ vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+
+ INIT_LIST_HEAD(&del_q);
+ spin_lock(&chan->pendq_lock);
+ for (num = 0; num < chan->q_nr; num++) {
+ list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
+ if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
+ list_del(&entry->node);
+ list_add_tail(&entry->node, &del_q);
+ }
+ }
+ }
+ spin_unlock(&chan->pendq_lock);
+
+ /* batch clean up */
+ num = 0;
+ list_for_each_entry_safe(entry, tmp, &del_q, node) {
+ list_del(&entry->node);
+ num++;
+ if (ctx) {
+ struct vk_msg_blk *msg;
+ int bit_set;
+ bool responded;
+ u32 msg_id;
+
+ /* if it is specific ctx, log for any stuck */
+ msg = entry->to_v_msg;
+ msg_id = get_msg_id(msg);
+ bit_set = test_bit(msg_id, vk->bmap);
+ responded = entry->to_h_msg ? true : false;
+ if (num <= batch_log)
+ dev_info(dev,
+ "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n",
+ msg->function_id, msg->size,
+ msg_id, entry->seq_num,
+ msg->context_id, entry->ctx->idx,
+ msg->cmd, msg->arg,
+ responded ? "T" : "F", bit_set);
+ if (responded)
+ atomic_dec(&ctx->pend_cnt);
+ else if (bit_set)
+ bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
+ }
+ bcm_vk_free_wkent(dev, entry);
+ }
+ if (num && ctx)
+ dev_info(dev, "Total drained items %d [fd-%d]\n",
+ num, ctx->idx);
+}
+
+void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
+{
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
+}
+
+/*
+ * Function to sync up the messages queue info that is provided by BAR1
+ */
+int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
+{
+ struct bcm_vk_msgq __iomem *msgq;
+ struct device *dev = &vk->pdev->dev;
+ u32 msgq_off;
+ u32 num_q;
+ struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
+ &vk->to_h_msg_chan};
+ struct bcm_vk_msg_chan *chan;
+ int i, j;
+ int ret = 0;
+
+ /*
+ * If the driver is loaded at startup where vk OS is not up yet,
+ * the msgq-info may not be available until a later time. In
+ * this case, we skip and the sync function is supposed to be
+ * called again.
+ */
+ if (!bcm_vk_msgq_marker_valid(vk)) {
+ dev_info(dev, "BAR1 msgq marker not initialized.\n");
+ return -EAGAIN;
+ }
+
+ msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
+
+ /* each side is always half the total */
+ num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
+ if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) {
+ dev_err(dev,
+ "Advertised msgq %d error - max %d allowed\n",
+ num_q, VK_MSGQ_PER_CHAN_MAX);
+ return -EINVAL;
+ }
+
+ vk->to_v_msg_chan.q_nr = num_q;
+ vk->to_h_msg_chan.q_nr = num_q;
+
+ /* first msgq location */
+ msgq = vk->bar[BAR_1] + msgq_off;
+
+ /*
+ * if this function is called when it is already inited,
+ * something is wrong
+ */
+ if (bcm_vk_drv_access_ok(vk) && !force_sync) {
+ dev_err(dev, "Msgq info already in sync\n");
+ return -EPERM;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chan_list); i++) {
+ chan = chan_list[i];
+ memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo));
+
+ for (j = 0; j < num_q; j++) {
+ struct bcm_vk_sync_qinfo *qinfo;
+ u32 msgq_start;
+ u32 msgq_size;
+ u32 msgq_nxt;
+ u32 msgq_db_offset, q_db_offset;
+
+ chan->msgq[j] = msgq;
+ msgq_start = readl_relaxed(&msgq->start);
+ msgq_size = readl_relaxed(&msgq->size);
+ msgq_nxt = readl_relaxed(&msgq->nxt);
+ msgq_db_offset = readl_relaxed(&msgq->db_offset);
+ q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1));
+ if (q_db_offset == (~msgq_db_offset >> DB_SHIFT))
+ msgq_db_offset = q_db_offset;
+ else
+ /* fall back to default */
+ msgq_db_offset = VK_BAR0_Q_DB_BASE(j);
+
+ dev_info(dev,
+ "MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n",
+ j,
+ readw_relaxed(&msgq->type),
+ readw_relaxed(&msgq->num),
+ msgq_start,
+ msgq_db_offset,
+ readl_relaxed(&msgq->rd_idx),
+ readl_relaxed(&msgq->wr_idx),
+ msgq_size,
+ msgq_nxt);
+
+ qinfo = &chan->sync_qinfo[j];
+ /* formulate and record static info */
+ qinfo->q_start = vk->bar[BAR_1] + msgq_start;
+ qinfo->q_size = msgq_size;
+ /* set low threshold as 50% or 1/2 */
+ qinfo->q_low = qinfo->q_size >> 1;
+ qinfo->q_mask = qinfo->q_size - 1;
+ qinfo->q_db_offset = msgq_db_offset;
+
+ msgq++;
+ }
+ }
+ atomic_set(&vk->msgq_inited, 1);
+
+ return ret;
+}
+
+static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan)
+{
+ u32 i;
+
+ mutex_init(&chan->msgq_mutex);
+ spin_lock_init(&chan->pendq_lock);
+ for (i = 0; i < VK_MSGQ_MAX_NR; i++)
+ INIT_LIST_HEAD(&chan->pendq[i]);
+
+ return 0;
+}
+
+static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num,
+ struct bcm_vk_wkent *entry)
+{
+ struct bcm_vk_ctx *ctx;
+
+ spin_lock(&chan->pendq_lock);
+ list_add_tail(&entry->node, &chan->pendq[q_num]);
+ if (entry->to_h_msg) {
+ ctx = entry->ctx;
+ atomic_inc(&ctx->pend_cnt);
+ wake_up_interruptible(&ctx->rd_wq);
+ }
+ spin_unlock(&chan->pendq_lock);
+}
+
+static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
+ struct bcm_vk_wkent *entry,
+ struct _vk_data *data,
+ unsigned int num_planes)
+{
+ unsigned int i;
+ unsigned int item_cnt = 0;
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
+ struct vk_msg_blk *msg = &entry->to_v_msg[0];
+ struct bcm_vk_msgq __iomem *msgq;
+ struct bcm_vk_sync_qinfo *qinfo;
+ u32 ib_sgl_size = 0;
+ u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks];
+ u32 avail;
+ u32 q_num;
+
+ /* check if high watermark is hit, and if so, skip */
+ q_num = get_q_num(msg);
+ msgq = chan->msgq[q_num];
+ qinfo = &chan->sync_qinfo[q_num];
+ avail = msgq_avail_space(msgq, qinfo);
+ if (avail < qinfo->q_low) {
+ dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n",
+ avail, qinfo->q_size);
+ return 0;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ if (data[i].address &&
+ (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
+ item_cnt++;
+ memcpy(buf, entry->dma[i].sglist, data[i].size);
+ ib_sgl_size += data[i].size;
+ buf += data[i].size;
+ }
+ }
+
+ dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n",
+ item_cnt, ib_sgl_size, vk->ib_sgl_size);
+
+ /* round up size */
+ ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1)
+ >> VK_MSGQ_BLK_SZ_SHIFT;
+
+ return ib_sgl_size;
+}
+
+void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
+{
+ struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
+ struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num];
+
+ vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
+}
+
+static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
+{
+ static u32 seq_num;
+ struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
+ struct device *dev = &vk->pdev->dev;
+ struct vk_msg_blk *src = &entry->to_v_msg[0];
+
+ struct vk_msg_blk __iomem *dst;
+ struct bcm_vk_msgq __iomem *msgq;
+ struct bcm_vk_sync_qinfo *qinfo;
+ u32 q_num = get_q_num(src);
+ u32 wr_idx; /* local copy */
+ u32 i;
+ u32 avail;
+ u32 retry;
+
+ if (entry->to_v_blks != src->size + 1) {
+ dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n",
+ entry->to_v_blks,
+ src->size + 1,
+ get_msg_id(src),
+ src->function_id,
+ src->context_id);
+ return -EMSGSIZE;
+ }
+
+ msgq = chan->msgq[q_num];
+ qinfo = &chan->sync_qinfo[q_num];
+
+ mutex_lock(&chan->msgq_mutex);
+
+ avail = msgq_avail_space(msgq, qinfo);
+
+ /* if not enough space, return EAGAIN and let app handles it */
+ retry = 0;
+ while ((avail < entry->to_v_blks) &&
+ (retry++ < BCM_VK_H2VK_ENQ_RETRY)) {
+ mutex_unlock(&chan->msgq_mutex);
+
+ msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS);
+ mutex_lock(&chan->msgq_mutex);
+ avail = msgq_avail_space(msgq, qinfo);
+ }
+ if (retry > BCM_VK_H2VK_ENQ_RETRY) {
+ mutex_unlock(&chan->msgq_mutex);
+ return -EAGAIN;
+ }
+
+ /* at this point, mutex is taken and there is enough space */
+ entry->seq_num = seq_num++; /* update debug seq number */
+ wr_idx = readl_relaxed(&msgq->wr_idx);
+
+ if (wr_idx >= qinfo->q_size) {
+ dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!",
+ wr_idx, qinfo->q_size);
+ bcm_vk_blk_drv_access(vk);
+ bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
+ goto idx_err;
+ }
+
+ dst = msgq_blk_addr(qinfo, wr_idx);
+ for (i = 0; i < entry->to_v_blks; i++) {
+ memcpy_toio(dst, src, sizeof(*dst));
+
+ src++;
+ wr_idx = msgq_inc(qinfo, wr_idx, 1);
+ dst = msgq_blk_addr(qinfo, wr_idx);
+ }
+
+ /* flush the write pointer */
+ writel(wr_idx, &msgq->wr_idx);
+
+ /* log new info for debugging */
+ dev_dbg(dev,
+ "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n",
+ readl_relaxed(&msgq->num),
+ readl_relaxed(&msgq->rd_idx),
+ wr_idx,
+ entry->to_v_blks,
+ msgq_occupied(msgq, qinfo),
+ msgq_avail_space(msgq, qinfo),
+ readl_relaxed(&msgq->size));
+ /*
+ * press door bell based on queue number. 1 is added to the wr_idx
+ * to avoid the value of 0 appearing on the VK side to distinguish
+ * from initial value.
+ */
+ bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
+idx_err:
+ mutex_unlock(&chan->msgq_mutex);
+ return 0;
+}
+
+int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
+ const pid_t pid, const u32 q_num)
+{
+ int rc = 0;
+ struct bcm_vk_wkent *entry;
+ struct device *dev = &vk->pdev->dev;
+
+ /*
+ * check if the marker is still good. Sometimes, the PCIe interface may
+ * have gone done, and if so and we ship down thing based on broken
+ * values, kernel may panic.
+ */
+ if (!bcm_vk_msgq_marker_valid(vk)) {
+ dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n",
+ vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
+ return -EINVAL;
+ }
+
+ entry = kzalloc(sizeof(*entry) +
+ sizeof(struct vk_msg_blk), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ /* fill up necessary data */
+ entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
+ set_q_num(&entry->to_v_msg[0], q_num);
+ set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
+ entry->to_v_blks = 1; /* always 1 block */
+
+ entry->to_v_msg[0].cmd = shut_type;
+ entry->to_v_msg[0].arg = pid;
+
+ rc = bcm_to_v_msg_enqueue(vk, entry);
+ if (rc)
+ dev_err(dev,
+ "Sending shutdown message to q %d for pid %d fails.\n",
+ get_q_num(&entry->to_v_msg[0]), pid);
+
+ kfree(entry);
+
+ return rc;
+}
+
+static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
+ const u32 q_num)
+{
+ int rc = 0;
+ struct device *dev = &vk->pdev->dev;
+
+ /*
+ * don't send down or do anything if message queue is not initialized
+ * and if it is the reset session, clear it.
+ */
+ if (!bcm_vk_drv_access_ok(vk)) {
+ if (vk->reset_pid == pid)
+ vk->reset_pid = 0;
+ return -EPERM;
+ }
+
+ dev_dbg(dev, "No more sessions, shut down pid %d\n", pid);
+
+ /* only need to do it if it is not the reset process */
+ if (vk->reset_pid != pid)
+ rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
+ else
+ /* put reset_pid to 0 if it is exiting last session */
+ vk->reset_pid = 0;
+
+ return rc;
+}
+
+static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
+ struct bcm_vk_msg_chan *chan,
+ u16 q_num,
+ u16 msg_id)
+{
+ bool found = false;
+ struct bcm_vk_wkent *entry;
+
+ spin_lock(&chan->pendq_lock);
+ list_for_each_entry(entry, &chan->pendq[q_num], node) {
+ if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
+ list_del(&entry->node);
+ found = true;
+ bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
+ break;
+ }
+ }
+ spin_unlock(&chan->pendq_lock);
+ return ((found) ? entry : NULL);
+}
+
+s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
+ struct vk_msg_blk *data;
+ struct vk_msg_blk __iomem *src;
+ struct vk_msg_blk *dst;
+ struct bcm_vk_msgq __iomem *msgq;
+ struct bcm_vk_sync_qinfo *qinfo;
+ struct bcm_vk_wkent *entry;
+ u32 rd_idx, wr_idx;
+ u32 q_num, msg_id, j;
+ u32 num_blks;
+ s32 total = 0;
+ int cnt = 0;
+ int msg_processed = 0;
+ int max_msg_to_process;
+ bool exit_loop;
+
+ /*
+ * drain all the messages from the queues, and find its pending
+ * entry in the to_v queue, based on msg_id & q_num, and move the
+ * entry to the to_h pending queue, waiting for user space
+ * program to extract
+ */
+ mutex_lock(&chan->msgq_mutex);
+
+ for (q_num = 0; q_num < chan->q_nr; q_num++) {
+ msgq = chan->msgq[q_num];
+ qinfo = &chan->sync_qinfo[q_num];
+ max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size;
+
+ rd_idx = readl_relaxed(&msgq->rd_idx);
+ wr_idx = readl_relaxed(&msgq->wr_idx);
+ msg_processed = 0;
+ exit_loop = false;
+ while ((rd_idx != wr_idx) && !exit_loop) {
+ u8 src_size;
+
+ /*
+ * Make a local copy and get pointer to src blk
+ * The rd_idx is masked before getting the pointer to
+ * avoid out of bound access in case the interface goes
+ * down. It will end up pointing to the last block in
+ * the buffer, but subsequent src->size check would be
+ * able to catch this.
+ */
+ src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask);
+ src_size = readb(&src->size);
+
+ if ((rd_idx >= qinfo->q_size) ||
+ (src_size > (qinfo->q_size - 1))) {
+ dev_crit(dev,
+ "Invalid rd_idx 0x%x or size 0x%x => max 0x%x!",
+ rd_idx, src_size, qinfo->q_size);
+ bcm_vk_blk_drv_access(vk);
+ bcm_vk_set_host_alert(vk,
+ ERR_LOG_HOST_PCIE_DWN);
+ goto idx_err;
+ }
+
+ num_blks = src_size + 1;
+ data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL);
+ if (data) {
+ /* copy messages and linearize it */
+ dst = data;
+ for (j = 0; j < num_blks; j++) {
+ memcpy_fromio(dst, src, sizeof(*dst));
+
+ dst++;
+ rd_idx = msgq_inc(qinfo, rd_idx, 1);
+ src = msgq_blk_addr(qinfo, rd_idx);
+ }
+ total++;
+ } else {
+ /*
+ * if we could not allocate memory in kernel,
+ * that is fatal.
+ */
+ dev_crit(dev, "Kernel mem allocation failure.\n");
+ total = -ENOMEM;
+ goto idx_err;
+ }
+
+ /* flush rd pointer after a message is dequeued */
+ writel(rd_idx, &msgq->rd_idx);
+
+ /* log new info for debugging */
+ dev_dbg(dev,
+ "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n",
+ readl_relaxed(&msgq->num),
+ rd_idx,
+ wr_idx,
+ num_blks,
+ msgq_occupied(msgq, qinfo),
+ msgq_avail_space(msgq, qinfo),
+ readl_relaxed(&msgq->size));
+
+ /*
+ * No need to search if it is an autonomous one-way
+ * message from driver, as these messages do not bear
+ * a to_v pending item. Currently, only the shutdown
+ * message falls into this category.
+ */
+ if (data->function_id == VK_FID_SHUTDOWN) {
+ kfree(data);
+ continue;
+ }
+
+ msg_id = get_msg_id(data);
+ /* lookup original message in to_v direction */
+ entry = bcm_vk_dequeue_pending(vk,
+ &vk->to_v_msg_chan,
+ q_num,
+ msg_id);
+
+ /*
+ * if there is message to does not have prior send,
+ * this is the location to add here
+ */
+ if (entry) {
+ entry->to_h_blks = num_blks;
+ entry->to_h_msg = data;
+ bcm_vk_append_pendq(&vk->to_h_msg_chan,
+ q_num, entry);
+
+ } else {
+ if (cnt++ < batch_log)
+ dev_info(dev,
+ "Could not find MsgId[0x%x] for resp func %d bmap %d\n",
+ msg_id, data->function_id,
+ test_bit(msg_id, vk->bmap));
+ kfree(data);
+ }
+ /* Fetch wr_idx to handle more back-to-back events */
+ wr_idx = readl(&msgq->wr_idx);
+
+ /*
+ * cap the max so that even we try to handle more back-to-back events,
+ * so that it won't hold CPU too long or in case rd/wr idexes are
+ * corrupted which triggers infinite looping.
+ */
+ if (++msg_processed >= max_msg_to_process) {
+ dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n",
+ q_num, max_msg_to_process);
+ exit_loop = true;
+ }
+ }
+ }
+idx_err:
+ mutex_unlock(&chan->msgq_mutex);
+ dev_dbg(dev, "total %d drained from queues\n", total);
+
+ return total;
+}
+
+/*
+ * init routine for all required data structures
+ */
+static int bcm_vk_data_init(struct bcm_vk *vk)
+{
+ int i;
+
+ spin_lock_init(&vk->ctx_lock);
+ for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
+ vk->ctx[i].in_use = false;
+ vk->ctx[i].idx = i; /* self identity */
+ vk->ctx[i].miscdev = NULL;
+ }
+ spin_lock_init(&vk->msg_id_lock);
+ spin_lock_init(&vk->host_alert_lock);
+ vk->msg_id = 0;
+
+ /* initialize hash table */
+ for (i = 0; i < VK_PID_HT_SZ; i++)
+ INIT_LIST_HEAD(&vk->pid_ht[i].head);
+
+ return 0;
+}
+
+irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id)
+{
+ struct bcm_vk *vk = dev_id;
+
+ if (!bcm_vk_drv_access_ok(vk)) {
+ dev_err(&vk->pdev->dev,
+ "Interrupt %d received when msgq not inited\n", irq);
+ goto skip_schedule_work;
+ }
+
+ queue_work(vk->wq_thread, &vk->wq_work);
+
+skip_schedule_work:
+ return IRQ_HANDLED;
+}
+
+int bcm_vk_open(struct inode *inode, struct file *p_file)
+{
+ struct bcm_vk_ctx *ctx;
+ struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data;
+ struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
+ struct device *dev = &vk->pdev->dev;
+ int rc = 0;
+
+ /* get a context and set it up for file */
+ ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
+ if (!ctx) {
+ dev_err(dev, "Error allocating context\n");
+ rc = -ENOMEM;
+ } else {
+ /*
+ * set up context and replace private data with context for
+ * other methods to use. Reason for the context is because
+ * it is allowed for multiple sessions to open the sysfs, and
+ * for each file open, when upper layer query the response,
+ * only those that are tied to a specific open should be
+ * returned. The context->idx will be used for such binding
+ */
+ ctx->miscdev = miscdev;
+ p_file->private_data = ctx;
+ dev_dbg(dev, "ctx_returned with idx %d, pid %d\n",
+ ctx->idx, ctx->pid);
+ }
+ return rc;
+}
+
+ssize_t bcm_vk_read(struct file *p_file,
+ char __user *buf,
+ size_t count,
+ loff_t *f_pos)
+{
+ ssize_t rc = -ENOMSG;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
+ miscdev);
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
+ struct bcm_vk_wkent *entry = NULL;
+ u32 q_num;
+ u32 rsp_length;
+ bool found = false;
+
+ if (!bcm_vk_drv_access_ok(vk))
+ return -EPERM;
+
+ dev_dbg(dev, "Buf count %zu\n", count);
+ found = false;
+
+ /*
+ * search through the pendq on the to_h chan, and return only those
+ * that belongs to the same context. Search is always from the high to
+ * the low priority queues
+ */
+ spin_lock(&chan->pendq_lock);
+ for (q_num = 0; q_num < chan->q_nr; q_num++) {
+ list_for_each_entry(entry, &chan->pendq[q_num], node) {
+ if (entry->ctx->idx == ctx->idx) {
+ if (count >=
+ (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
+ list_del(&entry->node);
+ atomic_dec(&ctx->pend_cnt);
+ found = true;
+ } else {
+ /* buffer not big enough */
+ rc = -EMSGSIZE;
+ }
+ goto read_loop_exit;
+ }
+ }
+ }
+read_loop_exit:
+ spin_unlock(&chan->pendq_lock);
+
+ if (found) {
+ /* retrieve the passed down msg_id */
+ set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
+ rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
+ if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0)
+ rc = rsp_length;
+
+ bcm_vk_free_wkent(dev, entry);
+ } else if (rc == -EMSGSIZE) {
+ struct vk_msg_blk tmp_msg = entry->to_h_msg[0];
+
+ /*
+ * in this case, return just the first block, so
+ * that app knows what size it is looking for.
+ */
+ set_msg_id(&tmp_msg, entry->usr_msg_id);
+ tmp_msg.size = entry->to_h_blks - 1;
+ if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) {
+ dev_err(dev, "Error return 1st block in -EMSGSIZE\n");
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+
+ssize_t bcm_vk_write(struct file *p_file,
+ const char __user *buf,
+ size_t count,
+ loff_t *f_pos)
+{
+ ssize_t rc;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
+ miscdev);
+ struct bcm_vk_msgq __iomem *msgq;
+ struct device *dev = &vk->pdev->dev;
+ struct bcm_vk_wkent *entry;
+ u32 sgl_extra_blks;
+ u32 q_num;
+ u32 msg_size;
+ u32 msgq_size;
+
+ if (!bcm_vk_drv_access_ok(vk))
+ return -EPERM;
+
+ dev_dbg(dev, "Msg count %zu\n", count);
+
+ /* first, do sanity check where count should be multiple of basic blk */
+ if (count & (VK_MSGQ_BLK_SIZE - 1)) {
+ dev_err(dev, "Failure with size %zu not multiple of %zu\n",
+ count, VK_MSGQ_BLK_SIZE);
+ rc = -EINVAL;
+ goto write_err;
+ }
+
+ /* allocate the work entry + buffer for size count and inband sgl */
+ entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
+ GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto write_err;
+ }
+
+ /* now copy msg from user space, and then formulate the work entry */
+ if (copy_from_user(&entry->to_v_msg[0], buf, count)) {
+ rc = -EFAULT;
+ goto write_free_ent;
+ }
+
+ entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT;
+ entry->ctx = ctx;
+
+ /* do a check on the blk size which could not exceed queue space */
+ q_num = get_q_num(&entry->to_v_msg[0]);
+ msgq = vk->to_v_msg_chan.msgq[q_num];
+ msgq_size = readl_relaxed(&msgq->size);
+ if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
+ > (msgq_size - 1)) {
+ dev_err(dev, "Blk size %d exceed max queue size allowed %d\n",
+ entry->to_v_blks, msgq_size - 1);
+ rc = -EINVAL;
+ goto write_free_ent;
+ }
+
+ /* Use internal message id */
+ entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]);
+ rc = bcm_vk_get_msg_id(vk);
+ if (rc == VK_MSG_ID_OVERFLOW) {
+ dev_err(dev, "msg_id overflow\n");
+ rc = -EOVERFLOW;
+ goto write_free_ent;
+ }
+ set_msg_id(&entry->to_v_msg[0], rc);
+ ctx->q_num = q_num;
+
+ dev_dbg(dev,
+ "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n",
+ ctx->q_num, ctx->idx, entry->usr_msg_id,
+ get_msg_id(&entry->to_v_msg[0]));
+
+ if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) {
+ /* Convert any pointers to sg list */
+ unsigned int num_planes;
+ int dir;
+ struct _vk_data *data;
+
+ /*
+ * check if we are in reset, if so, no buffer transfer is
+ * allowed and return error.
+ */
+ if (vk->reset_pid) {
+ dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n",
+ ctx->pid);
+ rc = -EACCES;
+ goto write_free_msgid;
+ }
+
+ num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK;
+ if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD)
+ dir = DMA_FROM_DEVICE;
+ else
+ dir = DMA_TO_DEVICE;
+
+ /* Calculate vk_data location */
+ /* Go to end of the message */
+ msg_size = entry->to_v_msg[0].size;
+ if (msg_size > entry->to_v_blks) {
+ rc = -EMSGSIZE;
+ goto write_free_msgid;
+ }
+
+ data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1];
+
+ /* Now back up to the start of the pointers */
+ data -= num_planes;
+
+ /* Convert user addresses to DMA SG List */
+ rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes);
+ if (rc)
+ goto write_free_msgid;
+
+ atomic_inc(&ctx->dma_cnt);
+ /* try to embed inband sgl */
+ sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
+ num_planes);
+ entry->to_v_blks += sgl_extra_blks;
+ entry->to_v_msg[0].size += sgl_extra_blks;
+ } else if (entry->to_v_msg[0].function_id == VK_FID_INIT &&
+ entry->to_v_msg[0].context_id == VK_NEW_CTX) {
+ /*
+ * Init happens in 2 stages, only the first stage contains the
+ * pid that needs translating.
+ */
+ pid_t org_pid, pid;
+
+ /*
+ * translate the pid into the unique host space as user
+ * may run sessions inside containers or process
+ * namespaces.
+ */
+#define VK_MSG_PID_MASK 0xffffff00
+#define VK_MSG_PID_SH 8
+ org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK)
+ >> VK_MSG_PID_SH;
+
+ pid = task_tgid_nr(current);
+ entry->to_v_msg[0].arg =
+ (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) |
+ (pid << VK_MSG_PID_SH);
+ if (org_pid != pid)
+ dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n",
+ org_pid, org_pid, pid, pid);
+ }
+
+ /*
+ * store work entry to pending queue until a response is received.
+ * This needs to be done before enqueuing the message
+ */
+ bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
+
+ rc = bcm_to_v_msg_enqueue(vk, entry);
+ if (rc) {
+ dev_err(dev, "Fail to enqueue msg to to_v queue\n");
+
+ /* remove message from pending list */
+ entry = bcm_vk_dequeue_pending
+ (vk,
+ &vk->to_v_msg_chan,
+ q_num,
+ get_msg_id(&entry->to_v_msg[0]));
+ goto write_free_ent;
+ }
+
+ return count;
+
+write_free_msgid:
+ bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
+write_free_ent:
+ kfree(entry);
+write_err:
+ return rc;
+}
+
+__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait)
+{
+ __poll_t ret = 0;
+ int cnt;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ struct device *dev = &vk->pdev->dev;
+
+ poll_wait(p_file, &ctx->rd_wq, wait);
+
+ cnt = atomic_read(&ctx->pend_cnt);
+ if (cnt) {
+ ret = (__force __poll_t)(POLLIN | POLLRDNORM);
+ if (cnt < 0) {
+ dev_err(dev, "Error cnt %d, setting back to 0", cnt);
+ atomic_set(&ctx->pend_cnt, 0);
+ }
+ }
+
+ return ret;
+}
+
+int bcm_vk_release(struct inode *inode, struct file *p_file)
+{
+ int ret;
+ struct bcm_vk_ctx *ctx = p_file->private_data;
+ struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
+ struct device *dev = &vk->pdev->dev;
+ pid_t pid = ctx->pid;
+ int dma_cnt;
+ unsigned long timeout, start_time;
+
+ /*
+ * if there are outstanding DMA transactions, need to delay long enough
+ * to ensure that the card side would have stopped touching the host buffer
+ * and its SGL list. A race condition could happen if the host app is killed
+ * abruptly, eg kill -9, while some DMA transfer orders are still inflight.
+ * Nothing could be done except for a delay as host side is running in a
+ * completely async fashion.
+ */
+ start_time = jiffies;
+ timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n",
+ dma_cnt, ctx->idx, pid);
+ break;
+ }
+ dma_cnt = atomic_read(&ctx->dma_cnt);
+ cpu_relax();
+ cond_resched();
+ } while (dma_cnt);
+ dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
+ ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
+
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
+
+ ret = bcm_vk_free_ctx(vk, ctx);
+ if (ret == 0)
+ ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
+ else
+ ret = 0;
+
+ kref_put(&vk->kref, bcm_vk_release_data);
+
+ return ret;
+}
+
+int bcm_vk_msg_init(struct bcm_vk *vk)
+{
+ struct device *dev = &vk->pdev->dev;
+ int ret;
+
+ if (bcm_vk_data_init(vk)) {
+ dev_err(dev, "Error initializing internal data structures\n");
+ return -EINVAL;
+ }
+
+ if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
+ bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
+ dev_err(dev, "Error initializing communication channel\n");
+ return -EIO;
+ }
+
+ /* read msgq info if ready */
+ ret = bcm_vk_sync_msgq(vk, false);
+ if (ret && (ret != -EAGAIN)) {
+ dev_err(dev, "Error reading comm msg Q info\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void bcm_vk_msg_remove(struct bcm_vk *vk)
+{
+ bcm_vk_blk_drv_access(vk);
+
+ /* drain all pending items */
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
+ bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
+}
+
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h
new file mode 100644
index 000000000000..4eaad84825d6
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef BCM_VK_MSG_H
+#define BCM_VK_MSG_H
+
+#include <uapi/linux/misc/bcm_vk.h>
+#include "bcm_vk_sg.h"
+
+/* Single message queue control structure */
+struct bcm_vk_msgq {
+ u16 type; /* queue type */
+ u16 num; /* queue number */
+ u32 start; /* offset in BAR1 where the queue memory starts */
+
+ u32 rd_idx; /* read idx */
+ u32 wr_idx; /* write idx */
+
+ u32 size; /*
+ * size, which is in number of 16byte blocks,
+ * to align with the message data structure.
+ */
+ u32 nxt; /*
+ * nxt offset to the next msg queue struct.
+ * This is to provide flexibity for alignment purposes.
+ */
+
+/* Least significant 16 bits in below field hold doorbell register offset */
+#define DB_SHIFT 16
+
+ u32 db_offset; /* queue doorbell register offset in BAR0 */
+
+ u32 rsvd;
+};
+
+/*
+ * Structure to record static info from the msgq sync. We keep local copy
+ * for some of these variables for both performance + checking purpose.
+ */
+struct bcm_vk_sync_qinfo {
+ void __iomem *q_start;
+ u32 q_size;
+ u32 q_mask;
+ u32 q_low;
+ u32 q_db_offset;
+};
+
+#define VK_MSGQ_MAX_NR 4 /* Maximum number of message queues */
+
+/*
+ * message block - basic unit in the message where a message's size is always
+ * N x sizeof(basic_block)
+ */
+struct vk_msg_blk {
+ u8 function_id;
+#define VK_FID_TRANS_BUF 5
+#define VK_FID_SHUTDOWN 8
+#define VK_FID_INIT 9
+ u8 size; /* size of the message in number of vk_msg_blk's */
+ u16 trans_id; /* transport id, queue & msg_id */
+ u32 context_id;
+#define VK_NEW_CTX 0
+ u32 cmd;
+#define VK_CMD_PLANES_MASK 0x000f /* number of planes to up/download */
+#define VK_CMD_UPLOAD 0x0400 /* memory transfer to vk */
+#define VK_CMD_DOWNLOAD 0x0500 /* memory transfer from vk */
+#define VK_CMD_MASK 0x0f00 /* command mask */
+ u32 arg;
+};
+
+/* vk_msg_blk is 16 bytes fixed */
+#define VK_MSGQ_BLK_SIZE (sizeof(struct vk_msg_blk))
+/* shift for fast division of basic msg blk size */
+#define VK_MSGQ_BLK_SZ_SHIFT 4
+
+/* use msg_id 0 for any simplex host2vk communication */
+#define VK_SIMPLEX_MSG_ID 0
+
+/* context per session opening of sysfs */
+struct bcm_vk_ctx {
+ struct list_head node; /* use for linkage in Hash Table */
+ unsigned int idx;
+ bool in_use;
+ pid_t pid;
+ u32 hash_idx;
+ u32 q_num; /* queue number used by the stream */
+ struct miscdevice *miscdev;
+ atomic_t pend_cnt; /* number of items pending to be read from host */
+ atomic_t dma_cnt; /* any dma transaction outstanding */
+ wait_queue_head_t rd_wq;
+};
+
+/* pid hash table entry */
+struct bcm_vk_ht_entry {
+ struct list_head head;
+};
+
+#define VK_DMA_MAX_ADDRS 4 /* Max 4 DMA Addresses */
+/* structure for house keeping a single work entry */
+struct bcm_vk_wkent {
+ struct list_head node; /* for linking purpose */
+ struct bcm_vk_ctx *ctx;
+
+ /* Store up to 4 dma pointers */
+ struct bcm_vk_dma dma[VK_DMA_MAX_ADDRS];
+
+ u32 to_h_blks; /* response */
+ struct vk_msg_blk *to_h_msg;
+
+ /*
+ * put the to_v_msg at the end so that we could simply append to_v msg
+ * to the end of the allocated block
+ */
+ u32 usr_msg_id;
+ u32 to_v_blks;
+ u32 seq_num;
+ struct vk_msg_blk to_v_msg[0];
+};
+
+/* queue stats counters */
+struct bcm_vk_qs_cnts {
+ u32 cnt; /* general counter, used to limit output */
+ u32 acc_sum;
+ u32 max_occ; /* max during a sampling period */
+ u32 max_abs; /* the abs max since reset */
+};
+
+/* control channel structure for either to_v or to_h communication */
+struct bcm_vk_msg_chan {
+ u32 q_nr;
+ /* Mutex to access msgq */
+ struct mutex msgq_mutex;
+ /* pointing to BAR locations */
+ struct bcm_vk_msgq __iomem *msgq[VK_MSGQ_MAX_NR];
+ /* Spinlock to access pending queue */
+ spinlock_t pendq_lock;
+ /* for temporary storing pending items, one for each queue */
+ struct list_head pendq[VK_MSGQ_MAX_NR];
+ /* static queue info from the sync */
+ struct bcm_vk_sync_qinfo sync_qinfo[VK_MSGQ_MAX_NR];
+};
+
+/* totol number of message q allowed by the driver */
+#define VK_MSGQ_PER_CHAN_MAX 3
+#define VK_MSGQ_NUM_DEFAULT (VK_MSGQ_PER_CHAN_MAX - 1)
+
+/* total number of supported ctx, 32 ctx each for 5 components */
+#define VK_CMPT_CTX_MAX (32 * 5)
+
+/* hash table defines to store the opened FDs */
+#define VK_PID_HT_SHIFT_BIT 7 /* 128 */
+#define VK_PID_HT_SZ BIT(VK_PID_HT_SHIFT_BIT)
+
+/* The following are offsets of DDR info provided by the vk card */
+#define VK_BAR0_SEG_SIZE (4 * SZ_1K) /* segment size for BAR0 */
+
+/* shutdown types supported */
+#define VK_SHUTDOWN_PID 1
+#define VK_SHUTDOWN_GRACEFUL 2
+
+#endif
diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.c b/drivers/misc/bcm-vk/bcm_vk_sg.c
new file mode 100644
index 000000000000..2e9daaf3e492
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_sg.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/pgtable.h>
+#include <linux/vmalloc.h>
+
+#include <asm/page.h>
+#include <asm/unaligned.h>
+
+#include <uapi/linux/misc/bcm_vk.h>
+
+#include "bcm_vk.h"
+#include "bcm_vk_msg.h"
+#include "bcm_vk_sg.h"
+
+/*
+ * Valkyrie has a hardware limitation of 16M transfer size.
+ * So limit the SGL chunks to 16M.
+ */
+#define BCM_VK_MAX_SGL_CHUNK SZ_16M
+
+static int bcm_vk_dma_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int dir,
+ struct _vk_data *vkdata);
+static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma);
+
+/* Uncomment to dump SGLIST */
+/* #define BCM_VK_DUMP_SGLIST */
+
+static int bcm_vk_dma_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int direction,
+ struct _vk_data *vkdata)
+{
+ dma_addr_t addr, sg_addr;
+ int err;
+ int i;
+ int offset;
+ u32 size;
+ u32 remaining_size;
+ u32 transfer_size;
+ u64 data;
+ unsigned long first, last;
+ struct _vk_data *sgdata;
+
+ /* Get 64-bit user address */
+ data = get_unaligned(&vkdata->address);
+
+ /* offset into first page */
+ offset = offset_in_page(data);
+
+ /* Calculate number of pages */
+ first = (data & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ dma->nr_pages = last - first + 1;
+
+ /* Allocate DMA pages */
+ dma->pages = kmalloc_array(dma->nr_pages,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!dma->pages)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
+ data, vkdata->size, dma->nr_pages);
+
+ dma->direction = direction;
+
+ /* Get user pages into memory */
+ err = get_user_pages_fast(data & PAGE_MASK,
+ dma->nr_pages,
+ direction == DMA_FROM_DEVICE,
+ dma->pages);
+ if (err != dma->nr_pages) {
+ dma->nr_pages = (err >= 0) ? err : 0;
+ dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
+ err, dma->nr_pages);
+ return err < 0 ? err : -EINVAL;
+ }
+
+ /* Max size of sg list is 1 per mapped page + fields at start */
+ dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
+ (sizeof(u32) * SGLIST_VKDATA_START);
+
+ /* Allocate sglist */
+ dma->sglist = dma_alloc_coherent(dev,
+ dma->sglen,
+ &dma->handle,
+ GFP_KERNEL);
+ if (!dma->sglist)
+ return -ENOMEM;
+
+ dma->sglist[SGLIST_NUM_SG] = 0;
+ dma->sglist[SGLIST_TOTALSIZE] = vkdata->size;
+ remaining_size = vkdata->size;
+ sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
+
+ /* Map all pages into DMA */
+ size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
+ remaining_size -= size;
+ sg_addr = dma_map_page(dev,
+ dma->pages[0],
+ offset,
+ size,
+ dma->direction);
+ transfer_size = size;
+ if (unlikely(dma_mapping_error(dev, sg_addr))) {
+ __free_page(dma->pages[0]);
+ return -EIO;
+ }
+
+ for (i = 1; i < dma->nr_pages; i++) {
+ size = min_t(size_t, PAGE_SIZE, remaining_size);
+ remaining_size -= size;
+ addr = dma_map_page(dev,
+ dma->pages[i],
+ 0,
+ size,
+ dma->direction);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ __free_page(dma->pages[i]);
+ return -EIO;
+ }
+
+ /*
+ * Compress SG list entry when pages are contiguous
+ * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
+ */
+ if ((addr == (sg_addr + transfer_size)) &&
+ ((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) {
+ /* pages are contiguous, add to same sg entry */
+ transfer_size += size;
+ } else {
+ /* pages are not contiguous, write sg entry */
+ sgdata->size = transfer_size;
+ put_unaligned(sg_addr, (u64 *)&sgdata->address);
+ dma->sglist[SGLIST_NUM_SG]++;
+
+ /* start new sg entry */
+ sgdata++;
+ sg_addr = addr;
+ transfer_size = size;
+ }
+ }
+ /* Write last sg list entry */
+ sgdata->size = transfer_size;
+ put_unaligned(sg_addr, (u64 *)&sgdata->address);
+ dma->sglist[SGLIST_NUM_SG]++;
+
+ /* Update pointers and size field to point to sglist */
+ put_unaligned((u64)dma->handle, &vkdata->address);
+ vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
+ (sizeof(u32) * SGLIST_VKDATA_START);
+
+#ifdef BCM_VK_DUMP_SGLIST
+ dev_dbg(dev,
+ "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
+ (u64)dma->sglist,
+ dma->handle,
+ dma->sglen,
+ vkdata->size);
+ for (i = 0; i < vkdata->size / sizeof(u32); i++)
+ dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]);
+#endif
+
+ return 0;
+}
+
+int bcm_vk_sg_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int dir,
+ struct _vk_data *vkdata,
+ int num)
+{
+ int i;
+ int rc = -EINVAL;
+
+ /* Convert user addresses to DMA SG List */
+ for (i = 0; i < num; i++) {
+ if (vkdata[i].size && vkdata[i].address) {
+ /*
+ * If both size and address are non-zero
+ * then DMA alloc.
+ */
+ rc = bcm_vk_dma_alloc(dev,
+ &dma[i],
+ dir,
+ &vkdata[i]);
+ } else if (vkdata[i].size ||
+ vkdata[i].address) {
+ /*
+ * If one of size and address are zero
+ * there is a problem.
+ */
+ dev_err(dev,
+ "Invalid vkdata %x 0x%x 0x%llx\n",
+ i, vkdata[i].size, vkdata[i].address);
+ rc = -EINVAL;
+ } else {
+ /*
+ * If size and address are both zero
+ * don't convert, but return success.
+ */
+ rc = 0;
+ }
+
+ if (rc)
+ goto fail_alloc;
+ }
+ return rc;
+
+fail_alloc:
+ while (i > 0) {
+ i--;
+ if (dma[i].sglist)
+ bcm_vk_dma_free(dev, &dma[i]);
+ }
+ return rc;
+}
+
+static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
+{
+ dma_addr_t addr;
+ int i;
+ int num_sg;
+ u32 size;
+ struct _vk_data *vkdata;
+
+ dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen);
+
+ /* Unmap all pages in the sglist */
+ num_sg = dma->sglist[SGLIST_NUM_SG];
+ vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
+ for (i = 0; i < num_sg; i++) {
+ size = vkdata[i].size;
+ addr = get_unaligned(&vkdata[i].address);
+
+ dma_unmap_page(dev, addr, size, dma->direction);
+ }
+
+ /* Free allocated sglist */
+ dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle);
+
+ /* Release lock on all pages */
+ for (i = 0; i < dma->nr_pages; i++)
+ put_page(dma->pages[i]);
+
+ /* Free allocated dma pages */
+ kfree(dma->pages);
+ dma->sglist = NULL;
+
+ return 0;
+}
+
+int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
+ int *proc_cnt)
+{
+ int i;
+
+ *proc_cnt = 0;
+ /* Unmap and free all pages and sglists */
+ for (i = 0; i < num; i++) {
+ if (dma[i].sglist) {
+ bcm_vk_dma_free(dev, &dma[i]);
+ *proc_cnt += 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.h b/drivers/misc/bcm-vk/bcm_vk_sg.h
new file mode 100644
index 000000000000..81b3d0976ddb
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_sg.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef BCM_VK_SG_H
+#define BCM_VK_SG_H
+
+#include <linux/dma-mapping.h>
+
+struct bcm_vk_dma {
+ /* for userland buffer */
+ struct page **pages;
+ int nr_pages;
+
+ /* common */
+ dma_addr_t handle;
+ /*
+ * sglist is of the following LE format
+ * [U32] num_sg = number of sg addresses (N)
+ * [U32] totalsize = totalsize of data being transferred in sglist
+ * [U32] size[0] = size of data in address0
+ * [U32] addr_l[0] = lower 32-bits of address0
+ * [U32] addr_h[0] = higher 32-bits of address0
+ * ..
+ * [U32] size[N-1] = size of data in addressN-1
+ * [U32] addr_l[N-1] = lower 32-bits of addressN-1
+ * [U32] addr_h[N-1] = higher 32-bits of addressN-1
+ */
+ u32 *sglist;
+#define SGLIST_NUM_SG 0
+#define SGLIST_TOTALSIZE 1
+#define SGLIST_VKDATA_START 2
+
+ int sglen; /* Length (bytes) of sglist */
+ int direction;
+};
+
+struct _vk_data {
+ u32 size; /* data size in bytes */
+ u64 address; /* Pointer to data */
+} __packed;
+
+/*
+ * Scatter-gather DMA buffer API.
+ *
+ * These functions provide a simple way to create a page list and a
+ * scatter-gather list from userspace address and map the memory
+ * for DMA operation.
+ */
+int bcm_vk_sg_alloc(struct device *dev,
+ struct bcm_vk_dma *dma,
+ int dir,
+ struct _vk_data *vkdata,
+ int num);
+
+int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
+ int *proc_cnt);
+
+#endif
+
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
new file mode 100644
index 000000000000..4d02692ecfc7
--- /dev/null
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include "bcm_vk.h"
+
+/* TTYVK base offset is 0x30000 into BAR1 */
+#define BAR1_TTYVK_BASE_OFFSET 0x300000
+/* Each TTYVK channel (TO or FROM) is 0x10000 */
+#define BAR1_TTYVK_CHAN_OFFSET 0x100000
+/* Each TTYVK channel has TO and FROM, hence the * 2 */
+#define BAR1_TTYVK_BASE(index) (BAR1_TTYVK_BASE_OFFSET + \
+ ((index) * BAR1_TTYVK_CHAN_OFFSET * 2))
+/* TO TTYVK channel base comes before FROM for each index */
+#define TO_TTYK_BASE(index) BAR1_TTYVK_BASE(index)
+#define FROM_TTYK_BASE(index) (BAR1_TTYVK_BASE(index) + \
+ BAR1_TTYVK_CHAN_OFFSET)
+
+struct bcm_vk_tty_chan {
+ u32 reserved;
+ u32 size;
+ u32 wr;
+ u32 rd;
+ u32 *data;
+};
+
+#define VK_BAR_CHAN(v, DIR, e) ((v)->DIR##_offset \
+ + offsetof(struct bcm_vk_tty_chan, e))
+#define VK_BAR_CHAN_SIZE(v, DIR) VK_BAR_CHAN(v, DIR, size)
+#define VK_BAR_CHAN_WR(v, DIR) VK_BAR_CHAN(v, DIR, wr)
+#define VK_BAR_CHAN_RD(v, DIR) VK_BAR_CHAN(v, DIR, rd)
+#define VK_BAR_CHAN_DATA(v, DIR, off) (VK_BAR_CHAN(v, DIR, data) + (off))
+
+#define VK_BAR0_REGSEG_TTY_DB_OFFSET 0x86c
+
+/* Poll every 1/10 of second - temp hack till we use MSI interrupt */
+#define SERIAL_TIMER_VALUE (HZ / 10)
+
+static void bcm_vk_tty_poll(struct timer_list *t)
+{
+ struct bcm_vk *vk = from_timer(vk, t, serial_timer);
+
+ queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
+ mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
+}
+
+irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id)
+{
+ struct bcm_vk *vk = dev_id;
+
+ queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void bcm_vk_tty_wq_handler(struct work_struct *work)
+{
+ struct bcm_vk *vk = container_of(work, struct bcm_vk, tty_wq_work);
+ struct bcm_vk_tty *vktty;
+ int card_status;
+ int count;
+ unsigned char c;
+ int i;
+ int wr;
+
+ card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
+ if (BCM_VK_INTF_IS_DOWN(card_status))
+ return;
+
+ for (i = 0; i < BCM_VK_NUM_TTY; i++) {
+ count = 0;
+ /* Check the card status that the tty channel is ready */
+ if ((card_status & BIT(i)) == 0)
+ continue;
+
+ vktty = &vk->tty[i];
+
+ /* Don't increment read index if tty app is closed */
+ if (!vktty->is_opened)
+ continue;
+
+ /* Fetch the wr offset in buffer from VK */
+ wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, from));
+
+ /* safe to ignore until bar read gives proper size */
+ if (vktty->from_size == 0)
+ continue;
+
+ if (wr >= vktty->from_size) {
+ dev_err(&vk->pdev->dev,
+ "ERROR: wq handler ttyVK%d wr:0x%x > 0x%x\n",
+ i, wr, vktty->from_size);
+ /* Need to signal and close device in this case */
+ continue;
+ }
+
+ /*
+ * Simple read of circular buffer and
+ * insert into tty flip buffer
+ */
+ while (vk->tty[i].rd != wr) {
+ c = vkread8(vk, BAR_1,
+ VK_BAR_CHAN_DATA(vktty, from, vktty->rd));
+ vktty->rd++;
+ if (vktty->rd >= vktty->from_size)
+ vktty->rd = 0;
+ tty_insert_flip_char(&vktty->port, c, TTY_NORMAL);
+ count++;
+ }
+
+ if (count) {
+ tty_flip_buffer_push(&vktty->port);
+
+ /* Update read offset from shadow register to card */
+ vkwrite32(vk, vktty->rd, BAR_1,
+ VK_BAR_CHAN_RD(vktty, from));
+ }
+ }
+}
+
+static int bcm_vk_tty_open(struct tty_struct *tty, struct file *file)
+{
+ int card_status;
+ struct bcm_vk *vk;
+ struct bcm_vk_tty *vktty;
+ int index;
+
+ /* initialize the pointer in case something fails */
+ tty->driver_data = NULL;
+
+ vk = (struct bcm_vk *)dev_get_drvdata(tty->dev);
+ index = tty->index;
+
+ if (index >= BCM_VK_NUM_TTY)
+ return -EINVAL;
+
+ vktty = &vk->tty[index];
+
+ vktty->pid = task_pid_nr(current);
+ vktty->to_offset = TO_TTYK_BASE(index);
+ vktty->from_offset = FROM_TTYK_BASE(index);
+
+ /* Do not allow tty device to be opened if tty on card not ready */
+ card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
+ if (BCM_VK_INTF_IS_DOWN(card_status) || ((card_status & BIT(index)) == 0))
+ return -EBUSY;
+
+ /*
+ * Get shadow registers of the buffer sizes and the "to" write offset
+ * and "from" read offset
+ */
+ vktty->to_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, to));
+ vktty->wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, to));
+ vktty->from_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, from));
+ vktty->rd = vkread32(vk, BAR_1, VK_BAR_CHAN_RD(vktty, from));
+ vktty->is_opened = true;
+
+ if (tty->count == 1 && !vktty->irq_enabled) {
+ timer_setup(&vk->serial_timer, bcm_vk_tty_poll, 0);
+ mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ }
+ return 0;
+}
+
+static void bcm_vk_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct bcm_vk *vk = dev_get_drvdata(tty->dev);
+
+ if (tty->index >= BCM_VK_NUM_TTY)
+ return;
+
+ vk->tty[tty->index].is_opened = false;
+
+ if (tty->count == 1)
+ del_timer_sync(&vk->serial_timer);
+}
+
+static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val)
+{
+ vkwrite32(vk, db_val, BAR_0,
+ VK_BAR0_REGSEG_DB_BASE + VK_BAR0_REGSEG_TTY_DB_OFFSET);
+}
+
+static int bcm_vk_tty_write(struct tty_struct *tty,
+ const unsigned char *buffer,
+ int count)
+{
+ int index;
+ struct bcm_vk *vk;
+ struct bcm_vk_tty *vktty;
+ int i;
+
+ index = tty->index;
+ vk = dev_get_drvdata(tty->dev);
+ vktty = &vk->tty[index];
+
+ /* Simple write each byte to circular buffer */
+ for (i = 0; i < count; i++) {
+ vkwrite8(vk, buffer[i], BAR_1,
+ VK_BAR_CHAN_DATA(vktty, to, vktty->wr));
+ vktty->wr++;
+ if (vktty->wr >= vktty->to_size)
+ vktty->wr = 0;
+ }
+ /* Update write offset from shadow register to card */
+ vkwrite32(vk, vktty->wr, BAR_1, VK_BAR_CHAN_WR(vktty, to));
+ bcm_vk_tty_doorbell(vk, 0);
+
+ return count;
+}
+
+static int bcm_vk_tty_write_room(struct tty_struct *tty)
+{
+ struct bcm_vk *vk = dev_get_drvdata(tty->dev);
+
+ return vk->tty[tty->index].to_size - 1;
+}
+
+static const struct tty_operations serial_ops = {
+ .open = bcm_vk_tty_open,
+ .close = bcm_vk_tty_close,
+ .write = bcm_vk_tty_write,
+ .write_room = bcm_vk_tty_write_room,
+};
+
+int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
+{
+ int i;
+ int err;
+ struct tty_driver *tty_drv;
+ struct device *dev = &vk->pdev->dev;
+
+ tty_drv = tty_alloc_driver
+ (BCM_VK_NUM_TTY,
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(tty_drv))
+ return PTR_ERR(tty_drv);
+
+ /* Save struct tty_driver for uninstalling the device */
+ vk->tty_drv = tty_drv;
+
+ /* initialize the tty driver */
+ tty_drv->driver_name = KBUILD_MODNAME;
+ tty_drv->name = kstrdup(name, GFP_KERNEL);
+ if (!tty_drv->name) {
+ err = -ENOMEM;
+ goto err_put_tty_driver;
+ }
+ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_drv->subtype = SERIAL_TYPE_NORMAL;
+ tty_drv->init_termios = tty_std_termios;
+ tty_set_operations(tty_drv, &serial_ops);
+
+ /* register the tty driver */
+ err = tty_register_driver(tty_drv);
+ if (err) {
+ dev_err(dev, "tty_register_driver failed\n");
+ goto err_kfree_tty_name;
+ }
+
+ for (i = 0; i < BCM_VK_NUM_TTY; i++) {
+ struct device *tty_dev;
+
+ tty_port_init(&vk->tty[i].port);
+ tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
+ i, dev);
+ if (IS_ERR(tty_dev)) {
+ err = PTR_ERR(tty_dev);
+ goto unwind;
+ }
+ dev_set_drvdata(tty_dev, vk);
+ vk->tty[i].is_opened = false;
+ }
+
+ INIT_WORK(&vk->tty_wq_work, bcm_vk_tty_wq_handler);
+ vk->tty_wq_thread = create_singlethread_workqueue("tty");
+ if (!vk->tty_wq_thread) {
+ dev_err(dev, "Fail to create tty workqueue thread\n");
+ err = -ENOMEM;
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (--i >= 0)
+ tty_port_unregister_device(&vk->tty[i].port, tty_drv, i);
+ tty_unregister_driver(tty_drv);
+
+err_kfree_tty_name:
+ kfree(tty_drv->name);
+ tty_drv->name = NULL;
+
+err_put_tty_driver:
+ put_tty_driver(tty_drv);
+
+ return err;
+}
+
+void bcm_vk_tty_exit(struct bcm_vk *vk)
+{
+ int i;
+
+ del_timer_sync(&vk->serial_timer);
+ for (i = 0; i < BCM_VK_NUM_TTY; ++i) {
+ tty_port_unregister_device(&vk->tty[i].port,
+ vk->tty_drv,
+ i);
+ tty_port_destroy(&vk->tty[i].port);
+ }
+ tty_unregister_driver(vk->tty_drv);
+
+ kfree(vk->tty_drv->name);
+ vk->tty_drv->name = NULL;
+
+ put_tty_driver(vk->tty_drv);
+}
+
+void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk)
+{
+ struct bcm_vk_tty *vktty;
+ int i;
+
+ for (i = 0; i < BCM_VK_NUM_TTY; ++i) {
+ vktty = &vk->tty[i];
+ if (vktty->pid)
+ kill_pid(find_vpid(vktty->pid), SIGKILL, 1);
+ }
+}
+
+void bcm_vk_tty_wq_exit(struct bcm_vk *vk)
+{
+ cancel_work_sync(&vk->tty_wq_work);
+ destroy_workqueue(vk->tty_wq_thread);
+}
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 8859011672cb..8200af22b529 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -398,6 +398,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5227_extra_init_hw(pcr);
+ /* Power down OCP for power consumption */
+ if (!pcr->card_exist)
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
FUNC_FORCE_UPME_XMT_DBG);
rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 5a491d2cd1ae..273311184669 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -59,12 +59,6 @@ static const struct pci_device_id rtsx_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
-static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
-{
- pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPMC, 0);
-}
-
static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
rtsx_pci_write_register(pcr, MSGTXDATA0,
@@ -1805,7 +1799,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
struct pci_dev *pcidev = to_pci_dev(device);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
- int ret = 0;
handle = pci_get_drvdata(pcidev);
pcr = handle->pcr;
@@ -1830,7 +1823,7 @@ static int rtsx_pci_runtime_resume(struct device *device)
schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
mutex_unlock(&pcr->pcr_mutex);
- return ret;
+ return 0;
}
#else /* CONFIG_PM */
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index d97a243ad30c..c173a5e88c91 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -178,7 +178,7 @@ static ssize_t perst_reloads_same_image_store(struct device *device,
if ((rc != 1) || !(val == 1 || val == 0))
return -EINVAL;
- adapter->perst_same_image = (val == 1 ? true : false);
+ adapter->perst_same_image = (val == 1);
return count;
}
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 7c45f82b4302..80114f4c80ad 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
};
+static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
+ .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
+};
+
struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
@@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
}
+static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
+}
+
static int eeprom_93xx46_read(void *priv, unsigned int off,
void *val, size_t count)
{
@@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
cmd_addr, edev->spi->max_speed_hz);
+ if (has_quirk_extra_read_cycle(edev)) {
+ cmd_addr <<= 1;
+ bits += 1;
+ }
+
spi_message_init(&m);
t[0].tx_buf = (char *)&cmd_addr;
@@ -363,6 +377,7 @@ static void select_deassert(void *context)
static const struct of_device_id eeprom_93xx46_of_table[] = {
{ .compatible = "eeprom-93xx46", },
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+ { .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
{}
};
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
@@ -512,3 +527,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
MODULE_ALIAS("spi:93xx46");
+MODULE_ALIAS("spi:eeprom-93xx46");
+MODULE_ALIAS("spi:93lc46b");
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 70eb5ed942d0..f12e909034ac 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -520,12 +520,13 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
{
struct fastrpc_dma_buf_attachment *a = attachment->priv;
struct sg_table *table;
+ int ret;
table = &a->sgt;
- if (!dma_map_sgtable(attachment->dev, table, dir, 0))
- return ERR_PTR(-ENOMEM);
-
+ ret = dma_map_sgtable(attachment->dev, table, dir, 0);
+ if (ret)
+ table = ERR_PTR(ret);
return table;
}
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index eccd8c7dc62d..5d8b48288cf4 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -1,7 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+include $(src)/common/mmu/Makefile
+habanalabs-y += $(HL_COMMON_MMU_FILES)
+
+include $(src)/common/pci/Makefile
+habanalabs-y += $(HL_COMMON_PCI_FILES)
+
HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
- common/command_submission.o common/mmu.o common/mmu_v1.o \
- common/firmware_if.o common/pci.o
+ common/command_submission.o common/firmware_if.o
diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/misc/habanalabs/common/asid.c
index a2fdf31cf27c..ede04c032b6e 100644
--- a/drivers/misc/habanalabs/common/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
@@ -50,8 +50,10 @@ unsigned long hl_asid_alloc(struct hl_device *hdev)
void hl_asid_free(struct hl_device *hdev, unsigned long asid)
{
- if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid),
- "Invalid ASID %lu", asid))
+ if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) {
+ dev_crit(hdev->dev, "Invalid ASID %lu", asid);
return;
+ }
+
clear_bit(asid, hdev->asid_bitmap);
}
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 6f6a904ab6ca..d9adb9a5e4d8 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -635,10 +635,12 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
cb_handle >>= PAGE_SHIFT;
cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
- if (!cb)
+ /* hl_cb_get should never fail here */
+ if (!cb) {
+ dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
+ (u32) cb_handle);
goto destroy_cb;
+ }
return cb;
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index b2b3d2b0f808..7bd4a03b3429 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -48,8 +48,8 @@ void hl_sob_reset_error(struct kref *ref)
struct hl_device *hdev = hw_sob->hdev;
dev_crit(hdev->dev,
- "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
- hw_sob->q_idx, hw_sob->sob_id);
+ "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
+ hw_sob->q_idx, hw_sob->sob_id);
}
/**
@@ -149,9 +149,10 @@ void hl_fence_get(struct hl_fence *fence)
kref_get(&fence->refcount);
}
-static void hl_fence_init(struct hl_fence *fence)
+static void hl_fence_init(struct hl_fence *fence, u64 sequence)
{
kref_init(&fence->refcount);
+ fence->cs_sequence = sequence;
fence->error = 0;
fence->timestamp = ktime_set(0, 0);
init_completion(&fence->completion);
@@ -184,6 +185,28 @@ static void cs_job_put(struct hl_cs_job *job)
kref_put(&job->refcount, cs_job_do_release);
}
+bool cs_needs_completion(struct hl_cs *cs)
+{
+ /* In case this is a staged CS, only the last CS in sequence should
+ * get a completion, any non staged CS will always get a completion
+ */
+ if (cs->staged_cs && !cs->staged_last)
+ return false;
+
+ return true;
+}
+
+bool cs_needs_timeout(struct hl_cs *cs)
+{
+ /* In case this is a staged CS, only the first CS in sequence should
+ * get a timeout, any non staged CS will always get a timeout
+ */
+ if (cs->staged_cs && !cs->staged_first)
+ return false;
+
+ return true;
+}
+
static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
{
/*
@@ -225,6 +248,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.queue_type = job->queue_type;
parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
+ parser.completion = cs_needs_completion(job->cs);
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
@@ -290,13 +314,153 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
hl_debugfs_remove_job(hdev, job);
- if (job->queue_type == QUEUE_TYPE_EXT ||
- job->queue_type == QUEUE_TYPE_HW)
+ /* We decrement reference only for a CS that gets completion
+ * because the reference was incremented only for this kind of CS
+ * right before it was scheduled.
+ *
+ * In staged submission, only the last CS marked as 'staged_last'
+ * gets completion, hence its release function will be called from here.
+ * As for all the rest CS's in the staged submission which do not get
+ * completion, their CS reference will be decremented by the
+ * 'staged_last' CS during the CS release flow.
+ * All relevant PQ CI counters will be incremented during the CS release
+ * flow by calling 'hl_hw_queue_update_ci'.
+ */
+ if (cs_needs_completion(cs) &&
+ (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW))
cs_put(cs);
cs_job_put(job);
}
+/*
+ * hl_staged_cs_find_first - locate the first CS in this staged submission
+ *
+ * @hdev: pointer to device structure
+ * @cs_seq: staged submission sequence number
+ *
+ * @note: This function must be called under 'hdev->cs_mirror_lock'
+ *
+ * Find and return a CS pointer with the given sequence
+ */
+struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
+{
+ struct hl_cs *cs;
+
+ list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
+ if (cs->staged_cs && cs->staged_first &&
+ cs->sequence == cs_seq)
+ return cs;
+
+ return NULL;
+}
+
+/*
+ * is_staged_cs_last_exists - returns true if the last CS in sequence exists
+ *
+ * @hdev: pointer to device structure
+ * @cs: staged submission member
+ *
+ */
+bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs *last_entry;
+
+ last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
+ staged_cs_node);
+
+ if (last_entry->staged_last)
+ return true;
+
+ return false;
+}
+
+/*
+ * staged_cs_get - get CS reference if this CS is a part of a staged CS
+ *
+ * @hdev: pointer to device structure
+ * @cs: current CS
+ * @cs_seq: staged submission sequence number
+ *
+ * Increment CS reference for every CS in this staged submission except for
+ * the CS which get completion.
+ */
+static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
+{
+ /* Only the last CS in this staged submission will get a completion.
+ * We must increment the reference for all other CS's in this
+ * staged submission.
+ * Once we get a completion we will release the whole staged submission.
+ */
+ if (!cs->staged_last)
+ cs_get(cs);
+}
+
+/*
+ * staged_cs_put - put a CS in case it is part of staged submission
+ *
+ * @hdev: pointer to device structure
+ * @cs: CS to put
+ *
+ * This function decrements a CS reference (for a non completion CS)
+ */
+static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
+{
+ /* We release all CS's in a staged submission except the last
+ * CS which we have never incremented its reference.
+ */
+ if (!cs_needs_completion(cs))
+ cs_put(cs);
+}
+
+static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
+{
+ bool next_entry_found = false;
+ struct hl_cs *next;
+
+ if (!cs_needs_timeout(cs))
+ return;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ /* We need to handle tdr only once for the complete staged submission.
+ * Hence, we choose the CS that reaches this function first which is
+ * the CS marked as 'staged_last'.
+ */
+ if (cs->staged_cs && cs->staged_last)
+ cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+
+ spin_unlock(&hdev->cs_mirror_lock);
+
+ /* Don't cancel TDR in case this CS was timedout because we might be
+ * running from the TDR context
+ */
+ if (cs && (cs->timedout ||
+ hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
+ return;
+
+ if (cs && cs->tdr_active)
+ cancel_delayed_work_sync(&cs->work_tdr);
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ /* queue TDR for next CS */
+ list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
+ if (cs_needs_timeout(next)) {
+ next_entry_found = true;
+ break;
+ }
+
+ if (next_entry_found && !next->tdr_active) {
+ next->tdr_active = true;
+ schedule_delayed_work(&next->work_tdr,
+ hdev->timeout_jiffies);
+ }
+
+ spin_unlock(&hdev->cs_mirror_lock);
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
@@ -346,36 +510,37 @@ static void cs_do_release(struct kref *ref)
hdev->asic_funcs->hw_queues_unlock(hdev);
- /* Need to update CI for internal queues */
- hl_int_hw_queue_update_ci(cs);
+ /* Need to update CI for all queue jobs that does not get completion */
+ hl_hw_queue_update_ci(cs);
/* remove CS from CS mirror list */
spin_lock(&hdev->cs_mirror_lock);
list_del_init(&cs->mirror_node);
spin_unlock(&hdev->cs_mirror_lock);
- /* Don't cancel TDR in case this CS was timedout because we might be
- * running from the TDR context
- */
- if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
- struct hl_cs *next;
-
- if (cs->tdr_active)
- cancel_delayed_work_sync(&cs->work_tdr);
+ cs_handle_tdr(hdev, cs);
- spin_lock(&hdev->cs_mirror_lock);
-
- /* queue TDR for next CS */
- next = list_first_entry_or_null(&hdev->cs_mirror_list,
- struct hl_cs, mirror_node);
+ if (cs->staged_cs) {
+ /* the completion CS decrements reference for the entire
+ * staged submission
+ */
+ if (cs->staged_last) {
+ struct hl_cs *staged_cs, *tmp;
- if (next && !next->tdr_active) {
- next->tdr_active = true;
- schedule_delayed_work(&next->work_tdr,
- hdev->timeout_jiffies);
+ list_for_each_entry_safe(staged_cs, tmp,
+ &cs->staged_cs_node, staged_cs_node)
+ staged_cs_put(hdev, staged_cs);
}
- spin_unlock(&hdev->cs_mirror_lock);
+ /* A staged CS will be a member in the list only after it
+ * was submitted. We used 'cs_mirror_lock' when inserting
+ * it to list so we will use it again when removing it
+ */
+ if (cs->submitted) {
+ spin_lock(&hdev->cs_mirror_lock);
+ list_del(&cs->staged_cs_node);
+ spin_unlock(&hdev->cs_mirror_lock);
+ }
}
out:
@@ -461,7 +626,8 @@ static void cs_timedout(struct work_struct *work)
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_cs_type cs_type, struct hl_cs **cs_new)
+ enum hl_cs_type cs_type, u64 user_sequence,
+ struct hl_cs **cs_new)
{
struct hl_cs_counters_atomic *cntr;
struct hl_fence *other = NULL;
@@ -478,6 +644,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
return -ENOMEM;
}
+ /* increment refcnt for context */
+ hl_ctx_get(hdev, ctx);
+
cs->ctx = ctx;
cs->submitted = false;
cs->completed = false;
@@ -507,6 +676,18 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
(hdev->asic_prop.max_pending_cs - 1)];
if (other && !completion_done(&other->completion)) {
+ /* If the following statement is true, it means we have reached
+ * a point in which only part of the staged submission was
+ * submitted and we don't have enough room in the 'cs_pending'
+ * array for the rest of the submission.
+ * This causes a deadlock because this CS will never be
+ * completed as it depends on future CS's for completion.
+ */
+ if (other->cs_sequence == user_sequence)
+ dev_crit_ratelimited(hdev->dev,
+ "Staged CS %llu deadlock due to lack of resources",
+ user_sequence);
+
dev_dbg_ratelimited(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
@@ -525,7 +706,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
}
/* init hl_fence */
- hl_fence_init(&cs_cmpl->base_fence);
+ hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
cs->sequence = cs_cmpl->cs_seq;
@@ -549,6 +730,7 @@ free_fence:
kfree(cs_cmpl);
free_cs:
kfree(cs);
+ hl_ctx_put(ctx);
return rc;
}
@@ -556,6 +738,8 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_cs_job *job, *tmp;
+ staged_cs_put(hdev, cs);
+
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
complete_job(hdev, job);
}
@@ -565,7 +749,9 @@ void hl_cs_rollback_all(struct hl_device *hdev)
int i;
struct hl_cs *cs, *tmp;
- /* flush all completions */
+ /* flush all completions before iterating over the CS mirror list in
+ * order to avoid a race with the release functions
+ */
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
@@ -574,12 +760,24 @@ void hl_cs_rollback_all(struct hl_device *hdev)
cs_get(cs);
cs->aborted = true;
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
- cs->ctx->asid, cs->sequence);
+ cs->ctx->asid, cs->sequence);
cs_rollback(hdev, cs);
cs_put(cs);
}
}
+void hl_pending_cb_list_flush(struct hl_ctx *ctx)
+{
+ struct hl_pending_cb *pending_cb, *tmp;
+
+ list_for_each_entry_safe(pending_cb, tmp,
+ &ctx->pending_cb_list, cb_node) {
+ list_del(&pending_cb->cb_node);
+ hl_cb_put(pending_cb->cb);
+ kfree(pending_cb);
+ }
+}
+
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
@@ -734,6 +932,12 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
return -EBUSY;
}
+ if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
+ !hdev->supports_staged_submission) {
+ dev_err(hdev->dev, "staged submission not supported");
+ return -EPERM;
+ }
+
cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
@@ -805,10 +1009,38 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
return 0;
}
+static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
+ u64 sequence, u32 flags)
+{
+ if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
+ return 0;
+
+ cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
+ cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
+
+ if (cs->staged_first) {
+ /* Staged CS sequence is the first CS sequence */
+ INIT_LIST_HEAD(&cs->staged_cs_node);
+ cs->staged_sequence = cs->sequence;
+ } else {
+ /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
+ * under the cs_mirror_lock
+ */
+ cs->staged_sequence = sequence;
+ }
+
+ /* Increment CS reference if needed */
+ staged_cs_get(hdev, cs);
+
+ cs->staged_cs = true;
+
+ return 0;
+}
+
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
- u32 num_chunks, u64 *cs_seq, bool timestamp)
+ u32 num_chunks, u64 *cs_seq, u32 flags)
{
- bool int_queues_only = true;
+ bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
@@ -816,9 +1048,11 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
+ u64 user_sequence;
int rc, i;
cntr = &hdev->aggregated_cs_counters;
+ user_sequence = *cs_seq;
*cs_seq = ULLONG_MAX;
rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
@@ -826,20 +1060,26 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
if (rc)
goto out;
- /* increment refcnt for context */
- hl_ctx_get(hdev, hpriv->ctx);
+ if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
+ !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
+ staged_mid = true;
+ else
+ staged_mid = false;
- rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
- if (rc) {
- hl_ctx_put(hpriv->ctx);
+ rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
+ staged_mid ? user_sequence : ULLONG_MAX, &cs);
+ if (rc)
goto free_cs_chunk_array;
- }
- cs->timestamp = !!timestamp;
+ cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
*cs_seq = cs->sequence;
hl_debugfs_add_cs(cs);
+ rc = cs_staged_submission(hdev, cs, user_sequence, flags);
+ if (rc)
+ goto free_cs_object;
+
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -899,8 +1139,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
* Only increment for JOB on external or H/W queues, because
* only for those JOBs we get completion
*/
- if (job->queue_type == QUEUE_TYPE_EXT ||
- job->queue_type == QUEUE_TYPE_HW)
+ if (cs_needs_completion(cs) &&
+ (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW))
cs_get(cs);
hl_debugfs_add_job(hdev, job);
@@ -916,11 +1157,14 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
}
- if (int_queues_only) {
+ /* We allow a CS with any queue type combination as long as it does
+ * not get a completion
+ */
+ if (int_queues_only && cs_needs_completion(cs)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
- "Reject CS %d.%llu because only internal queues jobs are present\n",
+ "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
@@ -954,6 +1198,129 @@ out:
return rc;
}
+static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id)
+{
+ struct hw_queue_properties *hw_queue_prop;
+ struct hl_cs_counters_atomic *cntr;
+ struct hl_cs_job *job;
+
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id];
+ cntr = &hdev->aggregated_cs_counters;
+
+ job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
+ if (!job) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ return -ENOMEM;
+ }
+
+ job->id = 0;
+ job->cs = cs;
+ job->user_cb = cb;
+ atomic_inc(&job->user_cb->cs_cnt);
+ job->user_cb_size = size;
+ job->hw_queue_id = hw_queue_id;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size;
+
+ /* increment refcount as for external queues we get completion */
+ cs_get(cs);
+
+ cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+ list_add_tail(&job->cs_node, &cs->job_list);
+
+ hl_debugfs_add_job(hdev, job);
+
+ return 0;
+}
+
+static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_pending_cb *pending_cb, *tmp;
+ struct list_head local_cb_list;
+ struct hl_cs *cs;
+ struct hl_cb *cb;
+ u32 hw_queue_id;
+ u32 cb_size;
+ int process_list, rc = 0;
+
+ if (list_empty(&ctx->pending_cb_list))
+ return 0;
+
+ process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0);
+
+ /* Only a single thread is allowed to process the list */
+ if (!process_list)
+ return 0;
+
+ if (list_empty(&ctx->pending_cb_list))
+ goto free_pending_cb_token;
+
+ /* move all list elements to a local list */
+ INIT_LIST_HEAD(&local_cb_list);
+ spin_lock(&ctx->pending_cb_lock);
+ list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list,
+ cb_node)
+ list_move_tail(&pending_cb->cb_node, &local_cb_list);
+ spin_unlock(&ctx->pending_cb_lock);
+
+ rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs);
+ if (rc)
+ goto add_list_elements;
+
+ hl_debugfs_add_cs(cs);
+
+ /* Iterate through pending cb list, create jobs and add to CS */
+ list_for_each_entry(pending_cb, &local_cb_list, cb_node) {
+ cb = pending_cb->cb;
+ cb_size = pending_cb->cb_size;
+ hw_queue_id = pending_cb->hw_queue_id;
+
+ rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size,
+ hw_queue_id);
+ if (rc)
+ goto free_cs_object;
+ }
+
+ rc = hl_hw_queue_schedule_cs(cs);
+ if (rc) {
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu (%d)\n",
+ ctx->asid, cs->sequence, rc);
+ goto free_cs_object;
+ }
+
+ /* pending cb was scheduled successfully */
+ list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) {
+ list_del(&pending_cb->cb_node);
+ kfree(pending_cb);
+ }
+
+ cs_put(cs);
+
+ goto free_pending_cb_token;
+
+free_cs_object:
+ cs_rollback(hdev, cs);
+ cs_put(cs);
+add_list_elements:
+ spin_lock(&ctx->pending_cb_lock);
+ list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list,
+ cb_node)
+ list_move(&pending_cb->cb_node, &ctx->pending_cb_list);
+ spin_unlock(&ctx->pending_cb_lock);
+free_pending_cb_token:
+ atomic_set(&ctx->thread_pending_cb_token, 1);
+
+ return rc;
+}
+
static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
u64 *cs_seq)
{
@@ -1003,7 +1370,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, false);
+ cs_seq, 0);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1275,15 +1642,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
}
- /* increment refcnt for context */
- hl_ctx_get(hdev, ctx);
-
- rc = allocate_cs(hdev, ctx, cs_type, &cs);
+ rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs);
if (rc) {
if (cs_type == CS_TYPE_WAIT ||
cs_type == CS_TYPE_COLLECTIVE_WAIT)
hl_fence_put(sig_fence);
- hl_ctx_put(ctx);
goto free_cs_chunk_array;
}
@@ -1346,7 +1709,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
enum hl_cs_type cs_type;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
- u32 num_chunks;
+ u32 num_chunks, flags;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -1357,10 +1720,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
if (rc)
goto out;
+ rc = hl_submit_pending_cb(hpriv);
+ if (rc)
+ goto out;
+
cs_type = hl_cs_get_cs_type(args->in.cs_flags &
~HL_CS_FLAGS_FORCE_RESTORE);
chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
num_chunks = args->in.num_chunks_execute;
+ flags = args->in.cs_flags;
+
+ /* In case this is a staged CS, user should supply the CS sequence */
+ if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
+ !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
+ cs_seq = args->in.seq;
switch (cs_type) {
case CS_TYPE_SIGNAL:
@@ -1371,7 +1744,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
- args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
+ args->in.cs_flags);
break;
}
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index f65e6559149b..cda871afb8f4 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -12,9 +12,14 @@
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
- u64 idle_mask = 0;
+ u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
int i;
+ /* Release all allocated pending cb's, those cb's were never
+ * scheduled so it is safe to release them here
+ */
+ hl_pending_cb_list_flush(ctx);
+
/*
* If we arrived here, there are no jobs waiting for this context
* on its queues so we can safely remove it.
@@ -50,12 +55,15 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
if ((!hdev->pldm) && (hdev->pdev) &&
(!hdev->asic_funcs->is_device_idle(hdev,
- &idle_mask, NULL)))
+ idle_mask,
+ HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)))
dev_notice(hdev->dev,
- "device not idle after user context is closed (0x%llx)\n",
- idle_mask);
+ "device not idle after user context is closed (0x%llx, 0x%llx)\n",
+ idle_mask[0], idle_mask[1]);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
+ hdev->asic_funcs->ctx_fini(ctx);
+ hl_vm_ctx_fini(ctx);
hl_mmu_ctx_fini(ctx);
}
}
@@ -140,8 +148,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
+ INIT_LIST_HEAD(&ctx->pending_cb_list);
+ spin_lock_init(&ctx->pending_cb_lock);
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
+ atomic_set(&ctx->thread_pending_cb_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_fence *),
@@ -151,11 +162,18 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
- rc = hl_mmu_ctx_init(ctx);
+ rc = hl_vm_ctx_init(ctx);
if (rc) {
- dev_err(hdev->dev, "Failed to init mmu ctx module\n");
+ dev_err(hdev->dev, "Failed to init mem ctx module\n");
+ rc = -ENOMEM;
goto err_free_cs_pending;
}
+
+ rc = hdev->asic_funcs->ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "ctx_init failed\n");
+ goto err_vm_ctx_fini;
+ }
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
@@ -194,7 +212,8 @@ err_cb_va_pool_fini:
err_vm_ctx_fini:
hl_vm_ctx_fini(ctx);
err_asid_free:
- hl_asid_free(hdev, ctx->asid);
+ if (ctx->asid != HL_KERNEL_ASID_ID)
+ hl_asid_free(hdev, ctx->asid);
err_free_cs_pending:
kfree(ctx->cs_pending);
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index cef716643979..df847a6d19f4 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -310,8 +310,8 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
struct hl_ctx *ctx;
- struct hl_mmu_hop_info hops_info;
- u64 virt_addr = dev_entry->mmu_addr;
+ struct hl_mmu_hop_info hops_info = {0};
+ u64 virt_addr = dev_entry->mmu_addr, phys_addr;
int i;
if (!hdev->mmu_enable)
@@ -333,8 +333,19 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
- seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
- dev_entry->mmu_asid, dev_entry->mmu_addr);
+ phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val;
+
+ if (hops_info.scrambled_vaddr &&
+ (dev_entry->mmu_addr != hops_info.scrambled_vaddr))
+ seq_printf(s,
+ "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr,
+ hops_info.scrambled_vaddr,
+ hops_info.unscrambled_paddr, phys_addr);
+ else
+ seq_printf(s,
+ "asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
for (i = 0 ; i < hops_info.used_hops ; i++) {
seq_printf(s, "hop%d_addr: 0x%llx\n",
@@ -403,7 +414,7 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
- hdev->asic_funcs->is_device_idle(hdev, NULL, s);
+ hdev->asic_funcs->is_device_idle(hdev, NULL, 0, s);
return 0;
}
@@ -865,6 +876,17 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+
+ hdev->asic_funcs->ack_protection_bits_errors(hdev);
+
+ return 0;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -922,6 +944,11 @@ static const struct file_operations hl_stop_on_err_fops = {
.write = hl_stop_on_err_write
};
+static const struct file_operations hl_security_violations_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_security_violations_read
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1071,6 +1098,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_stop_on_err_fops);
+ debugfs_create_file("dump_security_violations",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_security_violations_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
ent = debugfs_create_file(hl_debugfs_list[i].name,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 69d04eca767f..15fcb5c31c4b 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -142,6 +142,9 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
case HL_MMAP_TYPE_CB:
return hl_cb_mmap(hpriv, vma);
+
+ case HL_MMAP_TYPE_BLOCK:
+ return hl_hw_block_mmap(hpriv, vma);
}
return -EINVAL;
@@ -373,7 +376,6 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock);
- mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list);
spin_lock_init(&hdev->cs_mirror_lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
@@ -414,7 +416,6 @@ static void device_early_fini(struct hl_device *hdev)
{
int i;
- mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
@@ -1158,12 +1159,20 @@ kill_processes:
atomic_set(&hdev->in_reset, 0);
hdev->needs_reset = false;
- if (hard_reset)
+ dev_notice(hdev->dev, "Successfully finished resetting the device\n");
+
+ if (hard_reset) {
hdev->hard_reset_cnt++;
- else
- hdev->soft_reset_cnt++;
- dev_warn(hdev->dev, "Successfully finished resetting the device\n");
+ /* After reset is done, we are ready to receive events from
+ * the F/W. We can't do it before because we will ignore events
+ * and if those events are fatal, we won't know about it and
+ * the device will be operational although it shouldn't be
+ */
+ hdev->asic_funcs->enable_events_from_fw(hdev);
+ } else {
+ hdev->soft_reset_cnt++;
+ }
return 0;
@@ -1314,11 +1323,16 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->compute_ctx = NULL;
+ hl_debugfs_add_device(hdev);
+
+ /* debugfs nodes are created in hl_ctx_init so it must be called after
+ * hl_debugfs_add_device.
+ */
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel context\n");
kfree(hdev->kernel_ctx);
- goto mmu_fini;
+ goto remove_device_from_debugfs;
}
rc = hl_cb_pool_init(hdev);
@@ -1327,8 +1341,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto release_ctx;
}
- hl_debugfs_add_device(hdev);
-
/*
* From this point, in case of an error, add char devices and create
* sysfs nodes as part of the error flow, to allow debugging.
@@ -1411,12 +1423,21 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->init_done = true;
+ /* After initialization is done, we are ready to receive events from
+ * the F/W. We can't do it before because we will ignore events and if
+ * those events are fatal, we won't know about it and the device will
+ * be operational although it shouldn't be
+ */
+ hdev->asic_funcs->enable_events_from_fw(hdev);
+
return 0;
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
"kernel ctx is still alive on initialization failure\n");
+remove_device_from_debugfs:
+ hl_debugfs_remove_device(hdev);
mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
@@ -1482,7 +1503,8 @@ void hl_device_fini(struct hl_device *hdev)
usleep_range(50, 200);
rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
if (ktime_compare(ktime_get(), timeout) > 0) {
- WARN(1, "Failed to remove device because reset function did not finish\n");
+ dev_crit(hdev->dev,
+ "Failed to remove device because reset function did not finish\n");
return;
}
}
@@ -1515,8 +1537,6 @@ void hl_device_fini(struct hl_device *hdev)
device_late_fini(hdev);
- hl_debugfs_remove_device(hdev);
-
/*
* Halt the engines and disable interrupts so we won't get any more
* completions from H/W and we won't have any accesses from the
@@ -1548,6 +1568,8 @@ void hl_device_fini(struct hl_device *hdev)
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
dev_err(hdev->dev, "kernel ctx is still alive\n");
+ hl_debugfs_remove_device(hdev);
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index c9a12980218a..09706c571e95 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -90,9 +90,10 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result)
{
+ struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
- u32 tmp;
+ u32 tmp, expected_ack_val;
int rc = 0;
pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
@@ -115,14 +116,23 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto out;
}
+ /* set fence to a non valid value */
+ pkt->fence = UINT_MAX;
+
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
if (rc) {
dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
goto out;
}
+ if (hdev->asic_prop.fw_app_security_map &
+ CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
+ expected_ack_val = queue->pi;
+ else
+ expected_ack_val = CPUCP_PACKET_FENCE_VAL;
+
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
- (tmp == CPUCP_PACKET_FENCE_VAL), 1000,
+ (tmp == expected_ack_val), 1000,
timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
@@ -279,8 +289,74 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
return rc;
}
+static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
+ u32 cpu_security_boot_status_reg)
+{
+ u32 err_val, security_val;
+
+ /* Some of the firmware status codes are deprecated in newer f/w
+ * versions. In those versions, the errors are reported
+ * in different registers. Therefore, we need to check those
+ * registers and print the exact errors. Moreover, there
+ * may be multiple errors, so we need to report on each error
+ * separately. Some of the error codes might indicate a state
+ * that is not an error per-se, but it is an error in production
+ * environment
+ */
+ err_val = RREG32(boot_err0_reg);
+ if (!(err_val & CPU_BOOT_ERR0_ENABLED))
+ return 0;
+
+ if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
+ dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
+ if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+ dev_warn(hdev->dev,
+ "Device boot warning - Skipped DRAM initialization\n");
+
+ if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
+ if (hdev->bmc_enable)
+ dev_warn(hdev->dev,
+ "Device boot error - Skipped waiting for BMC\n");
+ else
+ err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+ dev_err(hdev->dev,
+ "Device boot error - Serdes data from BMC not available\n");
+ if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - NIC F/W initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
+ dev_warn(hdev->dev,
+ "Device boot warning - security not ready\n");
+ if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
+ dev_err(hdev->dev, "Device boot error - security failure\n");
+ if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
+ dev_err(hdev->dev, "Device boot error - eFuse failure\n");
+ if (err_val & CPU_BOOT_ERR0_PLL_FAIL)
+ dev_err(hdev->dev, "Device boot error - PLL failure\n");
+
+ security_val = RREG32(cpu_security_boot_status_reg);
+ if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
+ dev_dbg(hdev->dev, "Device security status %#x\n",
+ security_val);
+
+ if (err_val & ~CPU_BOOT_ERR0_ENABLED)
+ return -EIO;
+
+ return 0;
+}
+
int hl_fw_cpucp_info_get(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg)
+ u32 cpu_security_boot_status_reg,
+ u32 boot_err0_reg)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet pkt = {};
@@ -314,6 +390,12 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
goto out;
}
+ rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ if (rc) {
+ dev_err(hdev->dev, "Errors in device boot\n");
+ goto out;
+ }
+
memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
sizeof(prop->cpucp_info));
@@ -483,58 +565,6 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
return rc;
}
-static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
- u32 cpu_security_boot_status_reg)
-{
- u32 err_val, security_val;
-
- /* Some of the firmware status codes are deprecated in newer f/w
- * versions. In those versions, the errors are reported
- * in different registers. Therefore, we need to check those
- * registers and print the exact errors. Moreover, there
- * may be multiple errors, so we need to report on each error
- * separately. Some of the error codes might indicate a state
- * that is not an error per-se, but it is an error in production
- * environment
- */
- err_val = RREG32(boot_err0_reg);
- if (!(err_val & CPU_BOOT_ERR0_ENABLED))
- return;
-
- if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
- dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
- dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
- if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
- dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
- dev_warn(hdev->dev,
- "Device boot warning - Skipped DRAM initialization\n");
- if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED)
- dev_warn(hdev->dev,
- "Device boot error - Skipped waiting for BMC\n");
- if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
- dev_err(hdev->dev,
- "Device boot error - Serdes data from BMC not available\n");
- if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
- dev_err(hdev->dev,
- "Device boot error - NIC F/W initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
- dev_warn(hdev->dev,
- "Device boot warning - security not ready\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
- dev_err(hdev->dev, "Device boot error - security failure\n");
- if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
- dev_err(hdev->dev, "Device boot error - eFuse failure\n");
-
- security_val = RREG32(cpu_security_boot_status_reg);
- if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
- dev_dbg(hdev->dev, "Device security status %#x\n",
- security_val);
-}
-
static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
{
/* Some of the status codes below are deprecated in newer f/w
@@ -659,6 +689,9 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot security status %#x\n",
+ security_status);
+
dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
prop->hard_reset_done_by_fw ? "enabled" : "disabled");
@@ -753,6 +786,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (prop->fw_boot_cpu_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
prop->hard_reset_done_by_fw = true;
+
+ dev_dbg(hdev->dev,
+ "Firmware boot CPU security status %#x\n",
+ prop->fw_boot_cpu_security_map);
}
dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
@@ -826,6 +863,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ if (rc)
+ return rc;
+
/* Clear reset status since we need to read again from app */
prop->hard_reset_done_by_fw = false;
@@ -837,6 +878,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
prop->hard_reset_done_by_fw = true;
+
+ dev_dbg(hdev->dev,
+ "Firmware application CPU security status %#x\n",
+ prop->fw_app_security_map);
}
dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
@@ -844,6 +889,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+ return 0;
+
out:
fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 41af347157e0..d933878b24d1 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -28,17 +28,18 @@
#define HL_NAME "habanalabs"
/* Use upper bits of mmap offset to store habana driver specific information.
- * bits[63:62] - Encode mmap type
+ * bits[63:61] - Encode mmap type
* bits[45:0] - mmap offset value
*
* NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
* defines are w.r.t to PAGE_SIZE
*/
-#define HL_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
-#define HL_MMAP_TYPE_MASK (0x3ull << HL_MMAP_TYPE_SHIFT)
+#define HL_MMAP_TYPE_SHIFT (61 - PAGE_SHIFT)
+#define HL_MMAP_TYPE_MASK (0x7ull << HL_MMAP_TYPE_SHIFT)
+#define HL_MMAP_TYPE_BLOCK (0x4ull << HL_MMAP_TYPE_SHIFT)
#define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT)
-#define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT)
+#define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT)
#define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
#define HL_PENDING_RESET_PER_SEC 10
@@ -408,6 +409,9 @@ struct hl_mmu_properties {
* @sync_stream_first_mon: first monitor available for sync stream use
* @first_available_user_sob: first sob available for the user
* @first_available_user_mon: first monitor available for the user
+ * @first_available_user_msix_interrupt: first available msix interrupt
+ * reserved for the user
+ * @first_available_cq: first available CQ for the user.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
* @fw_security_disabled: true if security measures are disabled in firmware,
@@ -416,6 +420,7 @@ struct hl_mmu_properties {
* from BOOT_DEV_STS0
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
+ * @num_functional_hbms: number of functional HBMs in each DCORE.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -468,18 +473,22 @@ struct asic_fixed_properties {
u16 sync_stream_first_mon;
u16 first_available_user_sob[HL_MAX_DCORES];
u16 first_available_user_mon[HL_MAX_DCORES];
+ u16 first_available_user_msix_interrupt;
+ u16 first_available_cq[HL_MAX_DCORES];
u8 tpc_enabled_mask;
u8 completion_queues_count;
u8 fw_security_disabled;
u8 fw_security_status_valid;
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
+ u8 num_functional_hbms;
};
/**
* struct hl_fence - software synchronization primitive
* @completion: fence is implemented using completion
* @refcount: refcount for this fence
+ * @cs_sequence: sequence of the corresponding command submission
* @error: mark this fence with error
* @timestamp: timestamp upon completion
*
@@ -487,6 +496,7 @@ struct asic_fixed_properties {
struct hl_fence {
struct completion completion;
struct kref refcount;
+ u64 cs_sequence;
int error;
ktime_t timestamp;
};
@@ -846,6 +856,19 @@ enum div_select_defs {
* @collective_wait_init_cs: Generate collective master/slave packets
* and place them in the relevant cs jobs
* @collective_wait_create_jobs: allocate collective wait cs jobs
+ * @scramble_addr: Routine to scramble the address prior of mapping it
+ * in the MMU.
+ * @descramble_addr: Routine to de-scramble the address prior of
+ * showing it to users.
+ * @ack_protection_bits_errors: ack and dump all security violations
+ * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it.
+ * also returns the size of the block if caller supplies
+ * a valid pointer for it
+ * @hw_block_mmap: mmap a HW block with a given id.
+ * @enable_events_from_fw: send interrupt to firmware to notify them the
+ * driver is ready to receive asynchronous events. This
+ * function should be called during the first init and
+ * after every hard-reset of the device
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -918,8 +941,8 @@ struct hl_asic_funcs {
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
- bool (*is_device_idle)(struct hl_device *hdev, u64 *mask,
- struct seq_file *s);
+ bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s);
int (*soft_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
@@ -955,6 +978,14 @@ struct hl_asic_funcs {
int (*collective_wait_create_jobs)(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
u32 collective_engine_id);
+ u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
+ u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
+ void (*ack_protection_bits_errors)(struct hl_device *hdev);
+ int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id);
+ int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ u32 block_id, u32 block_size);
+ void (*enable_events_from_fw)(struct hl_device *hdev);
};
@@ -1012,6 +1043,20 @@ struct hl_cs_counters_atomic {
};
/**
+ * struct hl_pending_cb - pending command buffer structure
+ * @cb_node: cb node in pending cb list
+ * @cb: command buffer to send in next submission
+ * @cb_size: command buffer size
+ * @hw_queue_id: destination queue id
+ */
+struct hl_pending_cb {
+ struct list_head cb_node;
+ struct hl_cb *cb;
+ u32 cb_size;
+ u32 hw_queue_id;
+};
+
+/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
@@ -1026,6 +1071,8 @@ struct hl_cs_counters_atomic {
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
* MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
+ * pending_cb_list: list of pending command buffers waiting to be sent upon
+ * next user command submission context.
* @cs_counters: context command submission counters.
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
@@ -1034,11 +1081,17 @@ struct hl_cs_counters_atomic {
* index to cs_pending array.
* @dram_default_hops: array that holds all hops addresses needed for default
* DRAM mapping.
+ * @pending_cb_lock: spinlock to protect pending cb list
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
* @thread_ctx_switch_token: token to prevent multiple threads of the same
* context from running the context switch phase.
* Only a single thread should run it.
+ * @thread_pending_cb_token: token to prevent multiple threads from processing
+ * the pending CB list. Only a single thread should
+ * process the list since it is protected by a
+ * spinlock and we don't want to halt the entire
+ * command submission sequence.
* @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
* the context switch phase from moving to their
* execution phase before the context switch phase
@@ -1057,13 +1110,16 @@ struct hl_ctx {
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
+ struct list_head pending_cb_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
u64 cs_sequence;
u64 *dram_default_hops;
+ spinlock_t pending_cb_lock;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
atomic_t thread_ctx_switch_token;
+ atomic_t thread_pending_cb_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
u32 handle;
@@ -1124,8 +1180,11 @@ struct hl_userptr {
* @finish_work: workqueue object to run when CS is completed by H/W.
* @work_tdr: delayed work node for TDR.
* @mirror_node : node in device mirror list of command submissions.
+ * @staged_cs_node: node in the staged cs list.
* @debugfs_list: node in debugfs list of command submissions.
* @sequence: the sequence number of this CS.
+ * @staged_sequence: the sequence of the staged submission this CS is part of,
+ * relevant only if staged_cs is set.
* @type: CS_TYPE_*.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
@@ -1133,7 +1192,11 @@ struct hl_userptr {
* @tdr_active: true if TDR was activated for this CS (to prevent
* double TDR activation).
* @aborted: true if CS was aborted due to some device error.
- * @timestamp: true if a timestmap must be captured upon completion
+ * @timestamp: true if a timestmap must be captured upon completion.
+ * @staged_last: true if this is the last staged CS and needs completion.
+ * @staged_first: true if this is the first staged CS and we need to receive
+ * timeout for this CS.
+ * @staged_cs: true if this CS is part of a staged submission.
*/
struct hl_cs {
u16 *jobs_in_queue_cnt;
@@ -1146,8 +1209,10 @@ struct hl_cs {
struct work_struct finish_work;
struct delayed_work work_tdr;
struct list_head mirror_node;
+ struct list_head staged_cs_node;
struct list_head debugfs_list;
u64 sequence;
+ u64 staged_sequence;
enum hl_cs_type type;
u8 submitted;
u8 completed;
@@ -1155,6 +1220,9 @@ struct hl_cs {
u8 tdr_active;
u8 aborted;
u8 timestamp;
+ u8 staged_last;
+ u8 staged_first;
+ u8 staged_cs;
};
/**
@@ -1225,6 +1293,7 @@ struct hl_cs_job {
* MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
* have streams so the engine can't be busy by another
* stream.
+ * @completion: true if we need completion for this CS.
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
@@ -1239,6 +1308,7 @@ struct hl_cs_parser {
u8 job_id;
u8 is_kernel_allocated_cb;
u8 contains_dma_pkt;
+ u8 completion;
};
/*
@@ -1688,12 +1758,20 @@ struct hl_mmu_per_hop_info {
* struct hl_mmu_hop_info - A structure describing the TLB hops and their
* hop-entries that were created in order to translate a virtual address to a
* physical one.
+ * @scrambled_vaddr: The value of the virtual address after scrambling. This
+ * address replaces the original virtual-address when mapped
+ * in the MMU tables.
+ * @unscrambled_paddr: The un-scrambled physical address.
* @hop_info: Array holding the per-hop information used for the translation.
* @used_hops: The number of hops used for the translation.
+ * @range_type: virtual address range type.
*/
struct hl_mmu_hop_info {
+ u64 scrambled_vaddr;
+ u64 unscrambled_paddr;
struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
u32 used_hops;
+ enum hl_va_range_type range_type;
};
/**
@@ -1766,7 +1844,6 @@ struct hl_mmu_funcs {
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
* @vm: virtual memory manager for MMU.
- * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
* @hwmon_dev: H/W monitor device.
* @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
@@ -1844,6 +1921,7 @@ struct hl_mmu_funcs {
* user processes
* @device_fini_pending: true if device_fini was called and might be
* waiting for the reset thread to finish
+ * @supports_staged_submission: true if staged submissions are supported
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1881,7 +1959,6 @@ struct hl_device {
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
struct hl_vm vm;
- struct mutex mmu_cache_lock;
struct device *hwmon_dev;
enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;
@@ -1950,6 +2027,7 @@ struct hl_device {
u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
+ u8 supports_staged_submission;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2067,7 +2145,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
int hl_hw_queue_schedule_cs(struct hl_cs *cs);
u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
-void hl_int_hw_queue_update_ci(struct hl_cs *cs);
+void hl_hw_queue_update_ci(struct hl_cs *cs);
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
@@ -2123,6 +2201,7 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
bool map_cb, u64 *handle);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
u32 handle);
void hl_cb_put(struct hl_cb *cb);
@@ -2136,6 +2215,7 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx);
void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
+void hl_pending_cb_list_flush(struct hl_ctx *ctx);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void hl_sob_reset_error(struct kref *ref);
@@ -2143,6 +2223,10 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
void hl_fence_put(struct hl_fence *fence);
void hl_fence_get(struct hl_fence *fence);
void cs_get(struct hl_cs *cs);
+bool cs_needs_completion(struct hl_cs *cs);
+bool cs_needs_timeout(struct hl_cs *cs);
+bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
+struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -2184,6 +2268,8 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
+u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
+u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
@@ -2201,7 +2287,8 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
int hl_fw_send_heartbeat(struct hl_device *hdev);
int hl_fw_cpucp_info_get(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg);
+ u32 cpu_security_boot_status_reg,
+ u32 boot_err0_reg);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index d25892d61ec9..03af61cecd37 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -57,12 +57,23 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
hw_ip.sram_base_address = prop->sram_user_base_address;
- hw_ip.dram_base_address = prop->dram_user_base_address;
+ hw_ip.dram_base_address =
+ hdev->mmu_enable && prop->dram_supports_virtual_memory ?
+ prop->dmmu.start_addr : prop->dram_user_base_address;
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
- hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+
+ if (hdev->mmu_enable)
+ hw_ip.dram_size =
+ DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
+ prop->dram_page_size) *
+ prop->dram_page_size;
+ else
+ hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
+ hw_ip.dram_page_size = prop->dram_page_size;
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
@@ -79,6 +90,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
+ hw_ip.first_available_interrupt_id =
+ prop->first_available_user_msix_interrupt;
return copy_to_user(out, &hw_ip,
min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
}
@@ -132,9 +145,10 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
return -EINVAL;
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
- &hw_idle.busy_engines_mask_ext, NULL);
+ hw_idle.busy_engines_mask_ext,
+ HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
hw_idle.busy_engines_mask =
- lower_32_bits(hw_idle.busy_engines_mask_ext);
+ lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -383,7 +397,8 @@ static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
prop->first_available_user_sob[args->dcore_id];
sm_info.first_available_monitor =
prop->first_available_user_mon[args->dcore_id];
-
+ sm_info.first_available_cq =
+ prop->first_available_cq[args->dcore_id];
return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
sizeof(sm_info))) ? -EFAULT : 0;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 76217258780a..0f335182267f 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -38,7 +38,7 @@ static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
return (abs(delta) - queue_len);
}
-void hl_int_hw_queue_update_ci(struct hl_cs *cs)
+void hl_hw_queue_update_ci(struct hl_cs *cs)
{
struct hl_device *hdev = cs->ctx->hdev;
struct hl_hw_queue *q;
@@ -53,8 +53,13 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs)
if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
return;
+ /* We must increment CI for every queue that will never get a
+ * completion, there are 2 scenarios this can happen:
+ * 1. All queues of a non completion CS will never get a completion.
+ * 2. Internal queues never gets completion.
+ */
for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_INT)
+ if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT)
atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
}
}
@@ -292,6 +297,10 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
len = job->job_cb_size;
ptr = cb->bus_address;
+ /* Skip completion flow in case this is a non completion CS */
+ if (!cs_needs_completion(job->cs))
+ goto submit_bd;
+
cq_pkt.data = cpu_to_le32(
((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
& CQ_ENTRY_SHADOW_INDEX_MASK) |
@@ -318,6 +327,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
+submit_bd:
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
@@ -525,6 +535,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
int rc = 0, i, cq_cnt;
+ bool first_entry;
u32 max_queues;
cntr = &hdev->aggregated_cs_counters;
@@ -548,7 +559,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i], true);
+ cs->jobs_in_queue_cnt[i],
+ cs_needs_completion(cs) ?
+ true : false);
break;
case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
@@ -583,12 +596,38 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
hdev->asic_funcs->collective_wait_init_cs(cs);
spin_lock(&hdev->cs_mirror_lock);
+
+ /* Verify staged CS exists and add to the staged list */
+ if (cs->staged_cs && !cs->staged_first) {
+ struct hl_cs *staged_cs;
+
+ staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (!staged_cs) {
+ dev_err(hdev->dev,
+ "Cannot find staged submission sequence %llu",
+ cs->staged_sequence);
+ rc = -EINVAL;
+ goto unlock_cs_mirror;
+ }
+
+ if (is_staged_cs_last_exists(hdev, staged_cs)) {
+ dev_err(hdev->dev,
+ "Staged submission sequence %llu already submitted",
+ cs->staged_sequence);
+ rc = -EINVAL;
+ goto unlock_cs_mirror;
+ }
+
+ list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
+ }
+
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
/* Queue TDR if the CS is the first entry and if timeout is wanted */
+ first_entry = list_first_entry(&hdev->cs_mirror_list,
+ struct hl_cs, mirror_node) == cs;
if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
- (list_first_entry(&hdev->cs_mirror_list,
- struct hl_cs, mirror_node) == cs)) {
+ first_entry && cs_needs_timeout(cs)) {
cs->tdr_active = true;
schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
@@ -623,6 +662,8 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
goto out;
+unlock_cs_mirror:
+ spin_unlock(&hdev->cs_mirror_lock);
unroll_cq_resv:
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 245c0159159f..1f5910517b0e 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -14,6 +14,9 @@
#define HL_MMU_DEBUG 0
+/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
+#define DRAM_POOL_PAGE_SIZE SZ_8M
+
/*
* The va ranges in context object contain a list with the available chunks of
* device virtual memory.
@@ -38,15 +41,14 @@
*/
/*
- * alloc_device_memory - allocate device memory
- *
- * @ctx : current context
- * @args : host parameters containing the requested size
- * @ret_handle : result handle
+ * alloc_device_memory() - allocate device memory.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters containing the requested size.
+ * @ret_handle: result handle.
*
* This function does the following:
- * - Allocate the requested size rounded up to 'dram_page_size' pages
- * - Return unique handle
+ * - Allocate the requested size rounded up to 'dram_page_size' pages.
+ * - Return unique handle for later map/unmap/free.
*/
static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
u32 *ret_handle)
@@ -55,15 +57,14 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
u64 paddr = 0, total_size, num_pgs, i;
- u32 num_curr_pgs, page_size, page_shift;
+ u32 num_curr_pgs, page_size;
int handle, rc;
bool contiguous;
num_curr_pgs = 0;
page_size = hdev->asic_prop.dram_page_size;
- page_shift = __ffs(page_size);
- num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
- total_size = num_pgs << page_shift;
+ num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
+ total_size = num_pgs * page_size;
if (!total_size) {
dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
@@ -182,17 +183,17 @@ pages_pack_err:
return rc;
}
-/*
- * dma_map_host_va - DMA mapping of the given host virtual address.
- * @hdev: habanalabs device structure
- * @addr: the host virtual address of the memory area
- * @size: the size of the memory area
- * @p_userptr: pointer to result userptr structure
+/**
+ * dma_map_host_va() - DMA mapping of the given host virtual address.
+ * @hdev: habanalabs device structure.
+ * @addr: the host virtual address of the memory area.
+ * @size: the size of the memory area.
+ * @p_userptr: pointer to result userptr structure.
*
* This function does the following:
- * - Allocate userptr structure
- * - Pin the given host memory using the userptr structure
- * - Perform DMA mapping to have the DMA addresses of the pages
+ * - Allocate userptr structure.
+ * - Pin the given host memory using the userptr structure.
+ * - Perform DMA mapping to have the DMA addresses of the pages.
*/
static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr **p_userptr)
@@ -236,14 +237,14 @@ userptr_err:
return rc;
}
-/*
- * dma_unmap_host_va - DMA unmapping of the given host virtual address.
- * @hdev: habanalabs device structure
- * @userptr: userptr to free
+/**
+ * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
+ * @hdev: habanalabs device structure.
+ * @userptr: userptr to free.
*
* This function does the following:
- * - Unpins the physical pages
- * - Frees the userptr structure
+ * - Unpins the physical pages.
+ * - Frees the userptr structure.
*/
static void dma_unmap_host_va(struct hl_device *hdev,
struct hl_userptr *userptr)
@@ -252,14 +253,13 @@ static void dma_unmap_host_va(struct hl_device *hdev,
kfree(userptr);
}
-/*
- * dram_pg_pool_do_release - free DRAM pages pool
- *
- * @ref : pointer to reference object
+/**
+ * dram_pg_pool_do_release() - free DRAM pages pool
+ * @ref: pointer to reference object.
*
* This function does the following:
- * - Frees the idr structure of physical pages handles
- * - Frees the generic pool of DRAM physical pages
+ * - Frees the idr structure of physical pages handles.
+ * - Frees the generic pool of DRAM physical pages.
*/
static void dram_pg_pool_do_release(struct kref *ref)
{
@@ -274,15 +274,15 @@ static void dram_pg_pool_do_release(struct kref *ref)
gen_pool_destroy(vm->dram_pg_pool);
}
-/*
- * free_phys_pg_pack - free physical page pack
- * @hdev: habanalabs device structure
- * @phys_pg_pack: physical page pack to free
+/**
+ * free_phys_pg_pack() - free physical page pack.
+ * @hdev: habanalabs device structure.
+ * @phys_pg_pack: physical page pack to free.
*
* This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block
- * structure by returning it to the general pool
- * - Free the hl_vm_phys_pg_pack structure
+ * structure by returning it to the general pool.
+ * - Free the hl_vm_phys_pg_pack structure.
*/
static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
@@ -313,20 +313,20 @@ static void free_phys_pg_pack(struct hl_device *hdev,
kfree(phys_pg_pack);
}
-/*
- * free_device_memory - free device memory
- *
- * @ctx : current context
- * @handle : handle of the memory chunk to free
+/**
+ * free_device_memory() - free device memory.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters containing the requested size.
*
* This function does the following:
- * - Free the device memory related to the given handle
+ * - Free the device memory related to the given handle.
*/
-static int free_device_memory(struct hl_ctx *ctx, u32 handle)
+static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ u32 handle = args->free.handle;
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
@@ -361,16 +361,15 @@ static int free_device_memory(struct hl_ctx *ctx, u32 handle)
return 0;
}
-/*
- * clear_va_list_locked - free virtual addresses list
- *
- * @hdev : habanalabs device structure
- * @va_list : list of virtual addresses to free
+/**
+ * clear_va_list_locked() - free virtual addresses list.
+ * @hdev: habanalabs device structure.
+ * @va_list: list of virtual addresses to free.
*
* This function does the following:
- * - Iterate over the list and free each virtual addresses block
+ * - Iterate over the list and free each virtual addresses block.
*
- * This function should be called only when va_list lock is taken
+ * This function should be called only when va_list lock is taken.
*/
static void clear_va_list_locked(struct hl_device *hdev,
struct list_head *va_list)
@@ -383,16 +382,15 @@ static void clear_va_list_locked(struct hl_device *hdev,
}
}
-/*
- * print_va_list_locked - print virtual addresses list
- *
- * @hdev : habanalabs device structure
- * @va_list : list of virtual addresses to print
+/**
+ * print_va_list_locked() - print virtual addresses list.
+ * @hdev: habanalabs device structure.
+ * @va_list: list of virtual addresses to print.
*
* This function does the following:
- * - Iterate over the list and print each virtual addresses block
+ * - Iterate over the list and print each virtual addresses block.
*
- * This function should be called only when va_list lock is taken
+ * This function should be called only when va_list lock is taken.
*/
static void print_va_list_locked(struct hl_device *hdev,
struct list_head *va_list)
@@ -409,18 +407,17 @@ static void print_va_list_locked(struct hl_device *hdev,
#endif
}
-/*
- * merge_va_blocks_locked - merge a virtual block if possible
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_list : pointer to the virtual addresses block list
- * @va_block : virtual block to merge with adjacent blocks
+/**
+ * merge_va_blocks_locked() - merge a virtual block if possible.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_list: pointer to the virtual addresses block list.
+ * @va_block: virtual block to merge with adjacent blocks.
*
* This function does the following:
* - Merge the given blocks with the adjacent blocks if their virtual ranges
- * create a contiguous virtual range
+ * create a contiguous virtual range.
*
- * This Function should be called only when va_list lock is taken
+ * This Function should be called only when va_list lock is taken.
*/
static void merge_va_blocks_locked(struct hl_device *hdev,
struct list_head *va_list, struct hl_vm_va_block *va_block)
@@ -445,19 +442,18 @@ static void merge_va_blocks_locked(struct hl_device *hdev,
}
}
-/*
- * add_va_block_locked - add a virtual block to the virtual addresses list
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_list : pointer to the virtual addresses block list
- * @start : start virtual address
- * @end : end virtual address
+/**
+ * add_va_block_locked() - add a virtual block to the virtual addresses list.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_list: pointer to the virtual addresses block list.
+ * @start: start virtual address.
+ * @end: end virtual address.
*
* This function does the following:
- * - Add the given block to the virtual blocks list and merge with other
- * blocks if a contiguous virtual block can be created
+ * - Add the given block to the virtual blocks list and merge with other blocks
+ * if a contiguous virtual block can be created.
*
- * This Function should be called only when va_list lock is taken
+ * This Function should be called only when va_list lock is taken.
*/
static int add_va_block_locked(struct hl_device *hdev,
struct list_head *va_list, u64 start, u64 end)
@@ -501,16 +497,15 @@ static int add_va_block_locked(struct hl_device *hdev,
return 0;
}
-/*
- * add_va_block - wrapper for add_va_block_locked
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_list : pointer to the virtual addresses block list
- * @start : start virtual address
- * @end : end virtual address
+/**
+ * add_va_block() - wrapper for add_va_block_locked.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_list: pointer to the virtual addresses block list.
+ * @start: start virtual address.
+ * @end: end virtual address.
*
* This function does the following:
- * - Takes the list lock and calls add_va_block_locked
+ * - Takes the list lock and calls add_va_block_locked.
*/
static inline int add_va_block(struct hl_device *hdev,
struct hl_va_range *va_range, u64 start, u64 end)
@@ -524,8 +519,9 @@ static inline int add_va_block(struct hl_device *hdev,
return rc;
}
-/*
+/**
* get_va_block() - get a virtual block for the given size and alignment.
+ *
* @hdev: pointer to the habanalabs device structure.
* @va_range: pointer to the virtual addresses range.
* @size: requested block size.
@@ -534,33 +530,51 @@ static inline int add_va_block(struct hl_device *hdev,
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
- * given size and alignment.
+ * given size, hint address and alignment.
* - Reserve the requested block and update the list.
* - Return the start address of the virtual block.
*/
-static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
- u64 size, u64 hint_addr, u32 va_block_align)
+static u64 get_va_block(struct hl_device *hdev,
+ struct hl_va_range *va_range,
+ u64 size, u64 hint_addr, u32 va_block_align)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
- u64 valid_start, valid_size, prev_start, prev_end, align_mask,
- res_valid_start = 0, res_valid_size = 0;
+ u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
+ align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
bool add_prev = false;
+ bool is_align_pow_2 = is_power_of_2(va_range->page_size);
+
+ if (is_align_pow_2)
+ align_mask = ~((u64)va_block_align - 1);
+ else
+ /*
+ * with non-power-of-2 range we work only with page granularity
+ * and the start address is page aligned,
+ * so no need for alignment checking.
+ */
+ size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
+ va_range->page_size;
- align_mask = ~((u64)va_block_align - 1);
+ tmp_hint_addr = hint_addr;
- /* check if hint_addr is aligned */
- if (hint_addr & (va_block_align - 1))
+ /* Check if we need to ignore hint address */
+ if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
+ (!is_align_pow_2 &&
+ do_div(tmp_hint_addr, va_range->page_size))) {
+ dev_info(hdev->dev, "Hint address 0x%llx will be ignored\n",
+ hint_addr);
hint_addr = 0;
+ }
mutex_lock(&va_range->lock);
print_va_list_locked(hdev, &va_range->list);
list_for_each_entry(va_block, &va_range->list, node) {
- /* calc the first possible aligned addr */
+ /* Calc the first possible aligned addr */
valid_start = va_block->start;
- if (valid_start & (va_block_align - 1)) {
+ if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
valid_start &= align_mask;
valid_start += va_block_align;
if (valid_start > va_block->end)
@@ -568,35 +582,41 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
}
valid_size = va_block->end - valid_start;
+ if (valid_size < size)
+ continue;
- if (valid_size >= size &&
- (!new_va_block || valid_size < res_valid_size)) {
+ /* Pick the minimal length block which has the required size */
+ if (!new_va_block || (valid_size < reserved_valid_size)) {
new_va_block = va_block;
- res_valid_start = valid_start;
- res_valid_size = valid_size;
+ reserved_valid_start = valid_start;
+ reserved_valid_size = valid_size;
}
if (hint_addr && hint_addr >= valid_start &&
- ((hint_addr + size) <= va_block->end)) {
+ (hint_addr + size) <= va_block->end) {
new_va_block = va_block;
- res_valid_start = hint_addr;
- res_valid_size = valid_size;
+ reserved_valid_start = hint_addr;
+ reserved_valid_size = valid_size;
break;
}
}
if (!new_va_block) {
dev_err(hdev->dev, "no available va block for size %llu\n",
- size);
+ size);
goto out;
}
- if (res_valid_start > new_va_block->start) {
+ /*
+ * Check if there is some leftover range due to reserving the new
+ * va block, then return it to the main virtual addresses list.
+ */
+ if (reserved_valid_start > new_va_block->start) {
prev_start = new_va_block->start;
- prev_end = res_valid_start - 1;
+ prev_end = reserved_valid_start - 1;
- new_va_block->start = res_valid_start;
- new_va_block->size = res_valid_size;
+ new_va_block->start = reserved_valid_start;
+ new_va_block->size = reserved_valid_size;
add_prev = true;
}
@@ -617,7 +637,7 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
out:
mutex_unlock(&va_range->lock);
- return res_valid_start;
+ return reserved_valid_start;
}
/*
@@ -644,9 +664,9 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
/**
* hl_get_va_range_type() - get va_range type for the given address and size.
- * @address: The start address of the area we want to validate.
- * @size: The size in bytes of the area we want to validate.
- * @type: returned va_range type
+ * @address: the start address of the area we want to validate.
+ * @size: the size in bytes of the area we want to validate.
+ * @type: returned va_range type.
*
* Return: true if the area is inside a valid range, false otherwise.
*/
@@ -667,16 +687,15 @@ static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
return -EINVAL;
}
-/*
- * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block
- *
+/**
+ * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
* @hdev: pointer to the habanalabs device structure
- * @ctx: current context
- * @start: start virtual address
- * @end: end virtual address
+ * @ctx: pointer to the context structure.
+ * @start: start virtual address.
+ * @end: end virtual address.
*
* This function does the following:
- * - Takes the list lock and calls add_va_block_locked
+ * - Takes the list lock and calls add_va_block_locked.
*/
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size)
@@ -701,11 +720,10 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
return rc;
}
-/*
- * get_sg_info - get number of pages and the DMA address from SG list
- *
- * @sg : the SG list
- * @dma_addr : pointer to DMA address to return
+/**
+ * get_sg_info() - get number of pages and the DMA address from SG list.
+ * @sg: the SG list.
+ * @dma_addr: pointer to DMA address to return.
*
* Calculate the number of consecutive pages described by the SG list. Take the
* offset of the address in the first page, add to it the length and round it up
@@ -719,17 +737,17 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
}
-/*
- * init_phys_pg_pack_from_userptr - initialize physical page pack from host
- * memory
- * @ctx: current context
- * @userptr: userptr to initialize from
- * @pphys_pg_pack: result pointer
+/**
+ * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
+ * memory
+ * @ctx: pointer to the context structure.
+ * @userptr: userptr to initialize from.
+ * @pphys_pg_pack: result pointer.
*
* This function does the following:
- * - Pin the physical pages related to the given virtual block
+ * - Pin the physical pages related to the given virtual block.
* - Create a physical page pack from the physical pages related to the given
- * virtual block
+ * virtual block.
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
@@ -821,16 +839,16 @@ page_pack_arr_mem_err:
return rc;
}
-/*
- * map_phys_pg_pack - maps the physical page pack.
- * @ctx: current context
- * @vaddr: start address of the virtual area to map from
- * @phys_pg_pack: the pack of physical pages to map to
+/**
+ * map_phys_pg_pack() - maps the physical page pack..
+ * @ctx: pointer to the context structure.
+ * @vaddr: start address of the virtual area to map from.
+ * @phys_pg_pack: the pack of physical pages to map to.
*
* This function does the following:
- * - Maps each chunk of virtual memory to matching physical chunk
- * - Stores number of successful mappings in the given argument
- * - Returns 0 on success, error code otherwise
+ * - Maps each chunk of virtual memory to matching physical chunk.
+ * - Stores number of successful mappings in the given argument.
+ * - Returns 0 on success, error code otherwise.
*/
static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
@@ -875,11 +893,11 @@ err:
return rc;
}
-/*
- * unmap_phys_pg_pack - unmaps the physical page pack
- * @ctx: current context
- * @vaddr: start address of the virtual area to unmap
- * @phys_pg_pack: the pack of physical pages to unmap
+/**
+ * unmap_phys_pg_pack() - unmaps the physical page pack.
+ * @ctx: pointer to the context structure.
+ * @vaddr: start address of the virtual area to unmap.
+ * @phys_pg_pack: the pack of physical pages to unmap.
*/
static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
@@ -913,7 +931,7 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
}
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
- u64 *paddr)
+ u64 *paddr)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
@@ -936,19 +954,18 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
return 0;
}
-/*
- * map_device_va - map the given memory
- *
- * @ctx : current context
- * @args : host parameters with handle/host virtual address
- * @device_addr : pointer to result device virtual address
+/**
+ * map_device_va() - map the given memory.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters with handle/host virtual address.
+ * @device_addr: pointer to result device virtual address.
*
* This function does the following:
* - If given a physical device memory handle, map to a device virtual block
- * and return the start address of this block
+ * and return the start address of this block.
* - If given a host virtual address and size, find the related physical pages,
* map a device virtual block to this pages and return the start address of
- * this block
+ * this block.
*/
static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *device_addr)
@@ -1034,7 +1051,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
hint_addr = args->map_device.hint_addr;
- /* DRAM VA alignment is the same as the DRAM page size */
+ /* DRAM VA alignment is the same as the MMU page size */
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
va_block_align = hdev->asic_prop.dmmu.page_size;
}
@@ -1125,24 +1142,26 @@ init_page_pack_err:
return rc;
}
-/*
- * unmap_device_va - unmap the given device virtual address
- *
- * @ctx : current context
- * @vaddr : device virtual address to unmap
- * @ctx_free : true if in context free flow, false otherwise.
+/**
+ * unmap_device_va() - unmap the given device virtual address.
+ * @ctx: pointer to the context structure.
+ * @args: host parameters with device virtual address to unmap.
+ * @ctx_free: true if in context free flow, false otherwise.
*
* This function does the following:
- * - Unmap the physical pages related to the given virtual address
- * - return the device virtual block to the virtual block list
+ * - unmap the physical pages related to the given virtual address.
+ * - return the device virtual block to the virtual block list.
*/
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
+static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
+ bool ctx_free)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
struct hl_va_range *va_range;
+ u64 vaddr = args->unmap.device_virt_addr;
enum vm_type_t *vm_type;
bool is_userptr;
int rc = 0;
@@ -1201,7 +1220,13 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
goto mapping_cnt_err;
}
- vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
+ if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
+ vaddr = prop->dram_base_address +
+ DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
+ phys_pg_pack->page_size) *
+ phys_pg_pack->page_size;
+ else
+ vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&ctx->mmu_lock);
@@ -1264,12 +1289,90 @@ vm_type_err:
return rc;
}
+static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
+ u32 *size)
+{
+ u32 block_id = 0;
+ int rc;
+
+ rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
+
+ *handle = block_id | HL_MMAP_TYPE_BLOCK;
+ *handle <<= PAGE_SHIFT;
+
+ return rc;
+}
+
+static void hw_block_vm_close(struct vm_area_struct *vma)
+{
+ struct hl_ctx *ctx = (struct hl_ctx *) vma->vm_private_data;
+
+ hl_ctx_put(ctx);
+ vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct hw_block_vm_ops = {
+ .close = hw_block_vm_close
+};
+
+/**
+ * hl_hw_block_mmap() - mmap a hw block to user.
+ * @hpriv: pointer to the private data of the fd
+ * @vma: pointer to vm_area_struct of the process
+ *
+ * Driver increments context reference for every HW block mapped in order
+ * to prevent user from closing FD without unmapping first
+ */
+int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 block_id, block_size;
+ int rc;
+
+ /* We use the page offset to hold the block id and thus we need to clear
+ * it before doing the mmap itself
+ */
+ block_id = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+
+ /* Driver only allows mapping of a complete HW block */
+ block_size = vma->vm_end - vma->vm_start;
+
+#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *) (uintptr_t) vma->vm_start, block_size)) {
+#else
+ if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
+#endif
+ dev_err(hdev->dev,
+ "user pointer is invalid - 0x%lx\n",
+ vma->vm_start);
+
+ return -EINVAL;
+ }
+
+ vma->vm_ops = &hw_block_vm_ops;
+ vma->vm_private_data = hpriv->ctx;
+
+ hl_ctx_get(hdev, hpriv->ctx);
+
+ rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
+ if (rc) {
+ hl_ctx_put(hpriv->ctx);
+ return rc;
+ }
+
+ vma->vm_pgoff = block_id;
+
+ return 0;
+}
+
static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
- u64 device_addr = 0;
- u32 handle = 0;
+ u64 block_handle, device_addr = 0;
+ u32 handle = 0, block_size;
int rc;
switch (args->in.op) {
@@ -1292,7 +1395,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
break;
case HL_MEM_OP_FREE:
- rc = free_device_memory(ctx, args->in.free.handle);
+ rc = free_device_memory(ctx, &args->in);
break;
case HL_MEM_OP_MAP:
@@ -1301,7 +1404,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
rc = 0;
} else {
rc = get_paddr_from_handle(ctx, &args->in,
- &device_addr);
+ &device_addr);
}
memset(args, 0, sizeof(*args));
@@ -1312,6 +1415,13 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
rc = 0;
break;
+ case HL_MEM_OP_MAP_BLOCK:
+ rc = map_block(hdev, args->in.map_block.block_addr,
+ &block_handle, &block_size);
+ args->out.block_handle = block_handle;
+ args->out.block_size = block_size;
+ break;
+
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -ENOTTY;
@@ -1328,8 +1438,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
union hl_mem_args *args = data;
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
- u64 device_addr = 0;
- u32 handle = 0;
+ u64 block_handle, device_addr = 0;
+ u32 handle = 0, block_size;
int rc;
if (!hl_device_operational(hdev, &status)) {
@@ -1400,7 +1510,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
goto out;
}
- rc = free_device_memory(ctx, args->in.free.handle);
+ rc = free_device_memory(ctx, &args->in);
break;
case HL_MEM_OP_MAP:
@@ -1411,8 +1521,14 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
- false);
+ rc = unmap_device_va(ctx, &args->in, false);
+ break;
+
+ case HL_MEM_OP_MAP_BLOCK:
+ rc = map_block(hdev, args->in.map_block.block_addr,
+ &block_handle, &block_size);
+ args->out.block_handle = block_handle;
+ args->out.block_size = block_size;
break;
default:
@@ -1473,16 +1589,16 @@ destroy_pages:
return rc;
}
-/*
- * hl_pin_host_memory - pins a chunk of host memory.
- * @hdev: pointer to the habanalabs device structure
- * @addr: the host virtual address of the memory area
- * @size: the size of the memory area
- * @userptr: pointer to hl_userptr structure
+/**
+ * hl_pin_host_memory() - pins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure.
+ * @addr: the host virtual address of the memory area.
+ * @size: the size of the memory area.
+ * @userptr: pointer to hl_userptr structure.
*
* This function does the following:
- * - Pins the physical pages
- * - Create an SG list from those pages
+ * - Pins the physical pages.
+ * - Create an SG list from those pages.
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr)
@@ -1571,11 +1687,10 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
kfree(userptr->sgt);
}
-/*
- * hl_userptr_delete_list - clear userptr list
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr_list : pointer to the list to clear
+/**
+ * hl_userptr_delete_list() - clear userptr list.
+ * @hdev: pointer to the habanalabs device structure.
+ * @userptr_list: pointer to the list to clear.
*
* This function does the following:
* - Iterates over the list and unpins the host memory and frees the userptr
@@ -1594,12 +1709,11 @@ void hl_userptr_delete_list(struct hl_device *hdev,
INIT_LIST_HEAD(userptr_list);
}
-/*
- * hl_userptr_is_pinned - returns whether the given userptr is pinned
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr_list : pointer to the list to clear
- * @userptr : pointer to userptr to check
+/**
+ * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
+ * @hdev: pointer to the habanalabs device structure.
+ * @userptr_list: pointer to the list to clear.
+ * @userptr: pointer to userptr to check.
*
* This function does the following:
* - Iterates over the list and checks if the given userptr is in it, means is
@@ -1617,12 +1731,12 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
return false;
}
-/*
- * va_range_init - initialize virtual addresses range
- * @hdev: pointer to the habanalabs device structure
- * @va_range: pointer to the range to initialize
- * @start: range start address
- * @end: range end address
+/**
+ * va_range_init() - initialize virtual addresses range.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_range: pointer to the range to initialize.
+ * @start: range start address.
+ * @end: range end address.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
@@ -1635,15 +1749,21 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
INIT_LIST_HEAD(&va_range->list);
- /* PAGE_SIZE alignment */
+ /*
+ * PAGE_SIZE alignment
+ * it is the callers responsibility to align the addresses if the
+ * page size is not a power of 2
+ */
- if (start & (PAGE_SIZE - 1)) {
- start &= PAGE_MASK;
- start += PAGE_SIZE;
- }
+ if (is_power_of_2(page_size)) {
+ if (start & (PAGE_SIZE - 1)) {
+ start &= PAGE_MASK;
+ start += PAGE_SIZE;
+ }
- if (end & (PAGE_SIZE - 1))
- end &= PAGE_MASK;
+ if (end & (PAGE_SIZE - 1))
+ end &= PAGE_MASK;
+ }
if (start >= end) {
dev_err(hdev->dev, "too small vm range for va list\n");
@@ -1664,13 +1784,13 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
return 0;
}
-/*
- * va_range_fini() - clear a virtual addresses range
- * @hdev: pointer to the habanalabs structure
- * va_range: pointer to virtual addresses range
+/**
+ * va_range_fini() - clear a virtual addresses range.
+ * @hdev: pointer to the habanalabs structure.
+ * va_range: pointer to virtual addresses rang.e
*
* This function does the following:
- * - Frees the virtual addresses block list and its lock
+ * - Frees the virtual addresses block list and its lock.
*/
static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
{
@@ -1682,22 +1802,22 @@ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
kfree(va_range);
}
-/*
- * vm_ctx_init_with_ranges() - initialize virtual memory for context
- * @ctx: pointer to the habanalabs context structure
+/**
+ * vm_ctx_init_with_ranges() - initialize virtual memory for context.
+ * @ctx: pointer to the habanalabs context structure.
* @host_range_start: host virtual addresses range start.
* @host_range_end: host virtual addresses range end.
* @host_huge_range_start: host virtual addresses range start for memory
- * allocated with huge pages.
+ * allocated with huge pages.
* @host_huge_range_end: host virtual addresses range end for memory allocated
* with huge pages.
* @dram_range_start: dram virtual addresses range start.
* @dram_range_end: dram virtual addresses range end.
*
* This function initializes the following:
- * - MMU for context
- * - Virtual address to area descriptor hashtable
- * - Virtual block list of available virtual memory
+ * - MMU for context.
+ * - Virtual address to area descriptor hashtable.
+ * - Virtual block list of available virtual memory.
*/
static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
u64 host_range_start,
@@ -1818,7 +1938,8 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
dram_range_start = prop->dmmu.start_addr;
dram_range_end = prop->dmmu.end_addr;
- dram_page_size = prop->dmmu.page_size;
+ dram_page_size = prop->dram_page_size ?
+ prop->dram_page_size : prop->dmmu.page_size;
host_range_start = prop->pmmu.start_addr;
host_range_end = prop->pmmu.end_addr;
host_page_size = prop->pmmu.page_size;
@@ -1832,15 +1953,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
dram_range_start, dram_range_end, dram_page_size);
}
-/*
- * hl_vm_ctx_fini - virtual memory teardown of context
- *
- * @ctx : pointer to the habanalabs context structure
+/**
+ * hl_vm_ctx_fini() - virtual memory teardown of context.
+ * @ctx: pointer to the habanalabs context structure.
*
* This function perform teardown the following:
- * - Virtual block list of available virtual memory
- * - Virtual address to area descriptor hashtable
- * - MMU for context
+ * - Virtual block list of available virtual memory.
+ * - Virtual address to area descriptor hashtable.
+ * - MMU for context.
*
* In addition this function does the following:
* - Unmaps the existing hashtable nodes if the hashtable is not empty. The
@@ -1859,9 +1979,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
struct hl_vm_phys_pg_pack *phys_pg_list;
struct hl_vm_hash_node *hnode;
struct hlist_node *tmp_node;
+ struct hl_mem_in args;
int i;
- if (!ctx->hdev->mmu_enable)
+ if (!hdev->mmu_enable)
return;
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
@@ -1878,13 +1999,18 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
- unmap_device_va(ctx, hnode->vaddr, true);
+ args.unmap.device_virt_addr = hnode->vaddr;
+ unmap_device_va(ctx, &args, true);
}
+ mutex_lock(&ctx->mmu_lock);
+
/* invalidate the cache once after the unmapping loop */
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+ mutex_unlock(&ctx->mmu_lock);
+
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
@@ -1911,19 +2037,19 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* because the user notifies us on allocations. If the user is no more,
* all DRAM is available
*/
- if (!ctx->hdev->asic_prop.dram_supports_virtual_memory)
- atomic64_set(&ctx->hdev->dram_used_mem, 0);
+ if (ctx->asid != HL_KERNEL_ASID_ID &&
+ !hdev->asic_prop.dram_supports_virtual_memory)
+ atomic64_set(&hdev->dram_used_mem, 0);
}
-/*
- * hl_vm_init - initialize virtual memory module
- *
- * @hdev : pointer to the habanalabs device structure
+/**
+ * hl_vm_init() - initialize virtual memory module.
+ * @hdev: pointer to the habanalabs device structure.
*
* This function initializes the following:
- * - MMU module
- * - DRAM physical pages pool of 2MB
- * - Idr for device memory allocation handles
+ * - MMU module.
+ * - DRAM physical pages pool of 2MB.
+ * - Idr for device memory allocation handles.
*/
int hl_vm_init(struct hl_device *hdev)
{
@@ -1931,7 +2057,13 @@ int hl_vm_init(struct hl_device *hdev)
struct hl_vm *vm = &hdev->vm;
int rc;
- vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
+ if (is_power_of_2(prop->dram_page_size))
+ vm->dram_pg_pool =
+ gen_pool_create(__ffs(prop->dram_page_size), -1);
+ else
+ vm->dram_pg_pool =
+ gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
+
if (!vm->dram_pg_pool) {
dev_err(hdev->dev, "Failed to create dram page pool\n");
return -ENOMEM;
@@ -1964,15 +2096,14 @@ pool_add_err:
return rc;
}
-/*
- * hl_vm_fini - virtual memory module teardown
- *
- * @hdev : pointer to the habanalabs device structure
+/**
+ * hl_vm_fini() - virtual memory module teardown.
+ * @hdev: pointer to the habanalabs device structure.
*
* This function perform teardown to the following:
- * - Idr for device memory allocation handles
- * - DRAM physical pages pool of 2MB
- * - MMU module
+ * - Idr for device memory allocation handles.
+ * - DRAM physical pages pool of 2MB.
+ * - MMU module.
*/
void hl_vm_fini(struct hl_device *hdev)
{
diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/misc/habanalabs/common/mmu/Makefile
new file mode 100644
index 000000000000..d852c3874658
--- /dev/null
+++ b/drivers/misc/habanalabs/common/mmu/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 28a4638741d8..71703a32350f 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
-#include "habanalabs.h"
+#include "../habanalabs.h"
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
@@ -166,7 +166,6 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
mmu_prop = &prop->pmmu;
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
-
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and unmap them separately.
@@ -174,11 +173,21 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
} else {
- dev_err(hdev->dev,
- "page size of %u is not %uKB aligned, can't unmap\n",
- page_size, mmu_prop->page_size >> 10);
+ /*
+ * MMU page size may differ from DRAM page size.
+ * In such case work with the DRAM page size and let the MMU
+ * scrambling routine to handle this mismatch when
+ * calculating the address to remove from the MMU page table
+ */
+ if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) {
+ real_page_size = prop->dram_page_size;
+ } else {
+ dev_err(hdev->dev,
+ "page size of %u is not %uKB aligned, can't unmap\n",
+ page_size, mmu_prop->page_size >> 10);
- return -EFAULT;
+ return -EFAULT;
+ }
}
npages = page_size / real_page_size;
@@ -253,6 +262,17 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
*/
if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
+ } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) &&
+ (prop->dram_page_size < mmu_prop->page_size)) {
+ /*
+ * MMU page size may differ from DRAM page size.
+ * In such case work with the DRAM page size and let the MMU
+ * scrambling routine handle this mismatch when calculating
+ * the address to place in the MMU page table. (in that case
+ * also make sure that the dram_page_size smaller than the
+ * mmu page size)
+ */
+ real_page_size = prop->dram_page_size;
} else {
dev_err(hdev->dev,
"page size of %u is not %uKB aligned, can't map\n",
@@ -261,9 +281,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
return -EFAULT;
}
- WARN_ONCE((phys_addr & (real_page_size - 1)),
- "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
- phys_addr, real_page_size);
+ /*
+ * Verify that the phys and virt addresses are aligned with the
+ * MMU page size (in dram this means checking the address and MMU
+ * after scrambling)
+ */
+ if ((is_dram_addr &&
+ ((hdev->asic_funcs->scramble_addr(hdev, phys_addr) &
+ (mmu_prop->page_size - 1)) ||
+ (hdev->asic_funcs->scramble_addr(hdev, virt_addr) &
+ (mmu_prop->page_size - 1)))) ||
+ (!is_dram_addr && ((phys_addr & (real_page_size - 1)) ||
+ (virt_addr & (real_page_size - 1)))))
+ dev_crit(hdev->dev,
+ "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size",
+ phys_addr, virt_addr, real_page_size);
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
@@ -444,19 +476,53 @@ void hl_mmu_swap_in(struct hl_ctx *ctx)
hdev->mmu_func[MMU_HR_PGT].swap_in(ctx);
}
+static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
+ struct hl_mmu_hop_info *hops,
+ u64 *phys_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
+ u32 hop0_shift_off;
+ void *p;
+
+ /* last hop holds the phys address and flags */
+ if (hops->unscrambled_paddr)
+ tmp_phys_addr = hops->unscrambled_paddr;
+ else
+ tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
+
+ if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
+ p = &prop->pmmu_huge;
+ else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
+ p = &prop->pmmu;
+ else /* HL_VA_RANGE_TYPE_DRAM */
+ p = &prop->dmmu;
+
+ /*
+ * find the correct hop shift field in hl_mmu_properties structure
+ * in order to determine the right maks for the page offset.
+ */
+ hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift);
+ p = (char *)p + hop0_shift_off;
+ p = (char *)p + ((hops->used_hops - 1) * sizeof(u64));
+ hop_shift = *(u64 *)p;
+ offset_mask = (1ull << hop_shift) - 1;
+ addr_mask = ~(offset_mask);
+ *phys_addr = (tmp_phys_addr & addr_mask) |
+ (virt_addr & offset_mask);
+}
+
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
{
struct hl_mmu_hop_info hops;
- u64 tmp_addr;
int rc;
rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
if (rc)
return rc;
- /* last hop holds the phys address and flags */
- tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val;
- *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK);
+ hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops, phys_addr);
return 0;
}
@@ -473,6 +539,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
if (!hdev->mmu_enable)
return -EOPNOTSUPP;
+ hops->scrambled_vaddr = virt_addr; /* assume no scrambling */
+
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
@@ -491,6 +559,11 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
mutex_unlock(&ctx->mmu_lock);
+ /* add page offset to physical address */
+ if (hops->unscrambled_paddr)
+ hl_mmu_pa_page_with_offset(ctx, virt_addr, hops,
+ &hops->unscrambled_paddr);
+
return rc;
}
@@ -512,3 +585,28 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
return 0;
}
+
+/**
+ * hl_mmu_scramble_addr() - The generic mmu address scrambling routine.
+ * @hdev: pointer to device data.
+ * @addr: The address to scramble.
+ *
+ * Return: The scrambled address.
+ */
+u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
+{
+ return addr;
+}
+
+/**
+ * hl_mmu_descramble_addr() - The generic mmu address descrambling
+ * routine.
+ * @hdev: pointer to device data.
+ * @addr: The address to descramble.
+ *
+ * Return: The un-scrambled address.
+ */
+u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
+{
+ return addr;
+}
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index 06d8a44dd5d4..c5e93ff32586 100644
--- a/drivers/misc/habanalabs/common/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -5,8 +5,8 @@
* All Rights Reserved.
*/
-#include "habanalabs.h"
-#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../habanalabs.h"
+#include "../../include/hw_ip/mmu/mmu_general.h"
#include <linux/slab.h>
diff --git a/drivers/misc/habanalabs/common/pci/Makefile b/drivers/misc/habanalabs/common/pci/Makefile
new file mode 100644
index 000000000000..dc922a686683
--- /dev/null
+++ b/drivers/misc/habanalabs/common/pci/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+HL_COMMON_PCI_FILES := common/pci/pci.o
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index b4725e6101f6..b799f9258fb0 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -5,8 +5,8 @@
* All Rights Reserved.
*/
-#include "habanalabs.h"
-#include "../include/hw_ip/pci/pci_general.h"
+#include "../habanalabs.h"
+#include "../../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
@@ -308,40 +308,6 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
}
/**
- * hl_pci_set_dma_mask() - Set DMA masks for the device.
- * @hdev: Pointer to hl_device structure.
- *
- * This function sets the DMA masks (regular and consistent) for a specified
- * value. If it doesn't succeed, it tries to set it to a fall-back value
- *
- * Return: 0 on success, non-zero for failure.
- */
-static int hl_pci_set_dma_mask(struct hl_device *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- int rc;
-
- /* set DMA mask */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
- if (rc) {
- dev_err(hdev->dev,
- "Failed to set pci dma mask to %d bits, error %d\n",
- hdev->dma_mask, rc);
- return rc;
- }
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
- if (rc) {
- dev_err(hdev->dev,
- "Failed to set pci consistent dma mask to %d bits, error %d\n",
- hdev->dma_mask, rc);
- return rc;
- }
-
- return 0;
-}
-
-/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
*
@@ -377,9 +343,14 @@ int hl_pci_init(struct hl_device *hdev)
goto unmap_pci_bars;
}
- rc = hl_pci_set_dma_mask(hdev);
- if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(hdev->dma_mask));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to set dma mask to %d bits, error %d\n",
+ hdev->dma_mask, rc);
goto unmap_pci_bars;
+ }
return 0;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index b328ddaa64ee..9152242778f5 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -225,6 +225,12 @@ gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
"MSG AXI LBW returned with error"
};
+enum gaudi_sm_sei_cause {
+ GAUDI_SM_SEI_SO_OVERFLOW,
+ GAUDI_SM_SEI_LBW_4B_UNALIGNED,
+ GAUDI_SM_SEI_AXI_RESPONSE_ERR
+};
+
static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
@@ -354,6 +360,10 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
struct hl_cs_job *job);
static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val);
+static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
+ u32 num_regs, u32 val);
+static int gaudi_schedule_register_memset(struct hl_device *hdev,
+ u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -517,6 +527,11 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->sync_stream_first_mon +
(num_sync_stream_queues * HL_RSVD_MONS);
+ prop->first_available_user_msix_interrupt = USHRT_MAX;
+
+ for (i = 0 ; i < HL_MAX_DCORES ; i++)
+ prop->first_available_cq[i] = USHRT_MAX;
+
/* disable fw security for now, set it in a later stage */
prop->fw_security_disabled = true;
prop->fw_security_status_valid = false;
@@ -913,11 +928,17 @@ static void gaudi_sob_group_hw_reset(struct kref *ref)
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
- int i;
+ u64 base_addr;
+ int rc;
- for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- (hw_sob_group->base_sob_id + i) * 4, 0);
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob_group->base_sob_id * 4;
+ rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id,
+ base_addr, NUMBER_OF_SOBS_IN_GRP, 0);
+ if (rc)
+ dev_err(hdev->dev,
+ "failed resetting sob group - sob base %u, count %u",
+ hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP);
kref_init(&hw_sob_group->kref);
}
@@ -1008,6 +1029,8 @@ static void gaudi_collective_master_init_job(struct hl_device *hdev,
cprop->hw_sob_group[sob_group_offset].base_sob_id;
master_monitor = prop->collective_mstr_mon_id[0];
+ cprop->hw_sob_group[sob_group_offset].queue_id = queue_id;
+
dev_dbg(hdev->dev,
"Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
master_sob_base, cprop->mstr_sob_mask[0],
@@ -1248,7 +1271,7 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
u32 queue_id, collective_queue, num_jobs;
u32 stream, nic_queue, nic_idx = 0;
bool skip;
- int i, rc;
+ int i, rc = 0;
/* Verify wait queue id is configured as master */
hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id];
@@ -1359,8 +1382,6 @@ static int gaudi_late_init(struct hl_device *hdev)
return rc;
}
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
-
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
@@ -1607,6 +1628,7 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
+ hdev->supports_staged_submission = true;
return 0;
@@ -3438,6 +3460,12 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
+ /* GC sends work to DMA engine through Upper CP in DMA5 so
+ * we need to not enable clock gating in that DMA
+ */
+ if (i == GAUDI_HBM_DMA_4)
+ enable = 0;
+
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
enable ? QMAN_CGM1_PWR_GATE_EN : 0);
@@ -3704,6 +3732,7 @@ static int gaudi_init_cpu(struct hl_device *hdev)
static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_eq *eq;
u32 status;
struct hl_hw_queue *cpu_pq =
@@ -3760,6 +3789,10 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
+ /* update FW application security bits */
+ if (prop->fw_security_status_valid)
+ prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
+
gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
@@ -4417,9 +4450,12 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
- if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ)
+ if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) {
+ /* make sure device CPU will read latest data from host */
+ mb();
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GAUDI_EVENT_PI_UPDATE);
+ }
}
static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
@@ -4518,7 +4554,6 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 idle_mask = 0;
int rc = 0;
u64 val = 0;
@@ -4531,8 +4566,8 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
hdev,
mmDMA0_CORE_STS0/* dummy */,
val/* dummy */,
- (hdev->asic_funcs->is_device_idle(hdev,
- &idle_mask, NULL)),
+ (hdev->asic_funcs->is_device_idle(hdev, NULL,
+ 0, NULL)),
1000,
HBM_SCRUBBING_TIMEOUT_US);
if (rc) {
@@ -5060,7 +5095,8 @@ static int gaudi_validate_cb(struct hl_device *hdev,
* 1. A packet that will act as a completion packet
* 2. A packet that will generate MSI-X interrupt
*/
- parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+ if (parser->completion)
+ parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
return rc;
}
@@ -5287,8 +5323,11 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
* 1. A packet that will act as a completion packet
* 2. A packet that will generate MSI interrupt
*/
- parser->patched_cb_size = parser->user_cb_size +
- sizeof(struct packet_msg_prot) * 2;
+ if (parser->completion)
+ parser->patched_cb_size = parser->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
+ else
+ parser->patched_cb_size = parser->user_cb_size;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
@@ -5304,10 +5343,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -5376,10 +5415,10 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -5579,31 +5618,206 @@ release_cb:
return rc;
}
-static void gaudi_restore_sm_registers(struct hl_device *hdev)
+static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
+ u32 num_regs, u32 val)
+{
+ struct packet_msg_long *pkt;
+ struct hl_cs_job *job;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
+ int i, rc;
+
+ cb_size = (sizeof(*pkt) * num_regs) + sizeof(struct packet_msg_prot);
+
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
+ return -ENOMEM;
+ }
+
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
+ if (!cb)
+ return -EFAULT;
+
+ pkt = cb->kernel_address;
+
+ ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
+
+ for (i = 0; i < num_regs ; i++, pkt++) {
+ pkt->ctl = cpu_to_le32(ctl);
+ pkt->value = cpu_to_le32(val);
+ pkt->addr = cpu_to_le64(reg_base + (i * 4));
+ }
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ atomic_inc(&job->user_cb->cs_cnt);
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = cb_size;
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = gaudi_send_job_on_qman0(hdev, job);
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ atomic_dec(&cb->cs_cnt);
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int gaudi_schedule_register_memset(struct hl_device *hdev,
+ u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
{
+ struct hl_ctx *ctx = hdev->compute_ctx;
+ struct hl_pending_cb *pending_cb;
+ struct packet_msg_long *pkt;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
int i;
- for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) {
- WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
- WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
- WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ /* If no compute context available or context is going down
+ * memset registers directly
+ */
+ if (!ctx || kref_read(&ctx->refcount) == 0)
+ return gaudi_memset_registers(hdev, reg_base, num_regs, val);
+
+ cb_size = (sizeof(*pkt) * num_regs) +
+ sizeof(struct packet_msg_prot) * 2;
+
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
+ return -ENOMEM;
+ }
+
+ pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL);
+ if (!pending_cb)
+ return -ENOMEM;
+
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
+ if (!cb) {
+ kfree(pending_cb);
+ return -EFAULT;
}
- for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) {
- WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
- WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
- WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ pkt = cb->kernel_address;
+
+ ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
+
+ for (i = 0; i < num_regs ; i++, pkt++) {
+ pkt->ctl = cpu_to_le32(ctl);
+ pkt->value = cpu_to_le32(val);
+ pkt->addr = cpu_to_le64(reg_base + (i * 4));
}
- i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4;
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
- for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4)
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ pending_cb->cb = cb;
+ pending_cb->cb_size = cb_size;
+ /* The queue ID MUST be an external queue ID. Otherwise, we will
+ * have undefined behavior
+ */
+ pending_cb->hw_queue_id = hw_queue_id;
- i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4;
+ spin_lock(&ctx->pending_cb_lock);
+ list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list);
+ spin_unlock(&ctx->pending_cb_lock);
- for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4)
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ return 0;
+}
+
+static int gaudi_restore_sm_registers(struct hl_device *hdev)
+{
+ u64 base_addr;
+ u32 num_regs;
+ int rc;
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ num_regs = NUM_OF_SOB_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ num_regs = NUM_OF_SOB_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ num_regs = NUM_OF_SOB_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0;
+ num_regs = NUM_OF_MONITORS_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0;
+ num_regs = NUM_OF_MONITORS_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0;
+ num_regs = NUM_OF_MONITORS_IN_BLOCK;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4);
+ num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 +
+ (GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4);
+ num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR;
+ rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
+ if (rc) {
+ dev_err(hdev->dev, "failed resetting SM registers");
+ return -ENOMEM;
+ }
+
+ return 0;
}
static void gaudi_restore_dma_registers(struct hl_device *hdev)
@@ -5660,18 +5874,23 @@ static void gaudi_restore_qm_registers(struct hl_device *hdev)
}
}
-static void gaudi_restore_user_registers(struct hl_device *hdev)
+static int gaudi_restore_user_registers(struct hl_device *hdev)
{
- gaudi_restore_sm_registers(hdev);
+ int rc;
+
+ rc = gaudi_restore_sm_registers(hdev);
+ if (rc)
+ return rc;
+
gaudi_restore_dma_registers(hdev);
gaudi_restore_qm_registers(hdev);
+
+ return 0;
}
static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
{
- gaudi_restore_user_registers(hdev);
-
- return 0;
+ return gaudi_restore_user_registers(hdev);
}
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
@@ -5730,8 +5949,6 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
} else {
rc = -EFAULT;
}
@@ -5777,8 +5994,6 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
} else {
rc = -EFAULT;
}
@@ -5828,8 +6043,6 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
} else {
rc = -EFAULT;
}
@@ -5878,8 +6091,6 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
}
if (hbm_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
} else {
rc = -EFAULT;
}
@@ -5924,7 +6135,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
return;
if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
- WARN(1, "asid %u is too big\n", asid);
+ dev_crit(hdev->dev, "asid %u is too big\n", asid);
return;
}
@@ -6227,7 +6438,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
dev_err_ratelimited(hdev->dev,
"Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
@@ -6658,6 +6869,34 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
}
+static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_sm_sei_data *sei_data)
+{
+ u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
+
+ switch (sei_data->sei_cause) {
+ case SM_SEI_SO_OVERFLOW:
+ dev_err(hdev->dev,
+ "SM %u SEI Error: SO %u overflow/underflow",
+ index, le32_to_cpu(sei_data->sei_log));
+ break;
+ case SM_SEI_LBW_4B_UNALIGNED:
+ dev_err(hdev->dev,
+ "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
+ index, le32_to_cpu(sei_data->sei_log));
+ break;
+ case SM_SEI_AXI_RESPONSE_ERR:
+ dev_err(hdev->dev,
+ "SM %u SEI Error: AXI ID %u response error",
+ index, le32_to_cpu(sei_data->sei_log));
+ break;
+ default:
+ dev_err(hdev->dev, "Unknown SM SEI cause %u",
+ le32_to_cpu(sei_data->sei_log));
+ break;
+ }
+}
+
static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
struct hl_eq_ecc_data *ecc_data)
{
@@ -6874,7 +7113,9 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch;
int err = 0;
- if (!hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_status_valid &&
+ (hdev->asic_prop.fw_app_security_map &
+ CPU_BOOT_DEV_STS0_HBM_ECC_EN)) {
if (!hbm_ecc_data) {
dev_err(hdev->dev, "No FW ECC data");
return 0;
@@ -6896,14 +7137,24 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
dev_err(hdev->dev,
- "HBM%d pc%d ECC: TYPE=%d, WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
- device, ch, type, wr_par, rd_par, ca_par, serr, derr);
+ "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+ device, ch, wr_par, rd_par, ca_par, serr, derr);
+ dev_err(hdev->dev,
+ "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%u, SEC_CNT=%d, DEC_CNT=%d\n",
+ device, ch, hbm_ecc_data->first_addr, type,
+ hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt,
+ hbm_ecc_data->dec_cnt);
err = 1;
return 0;
}
+ if (!hdev->asic_prop.fw_security_disabled) {
+ dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n");
+ return 0;
+ }
+
base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
@@ -7153,6 +7404,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
+ hl_fw_unmask_irq(hdev, event_type);
break;
case GAUDI_EVENT_TPC0_DEC:
@@ -7281,6 +7533,13 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
hl_fw_unmask_irq(hdev, event_type);
break;
+ case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_print_sm_sei_info(hdev, event_type,
+ &eq_entry->sm_sei_data);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
gaudi_print_clk_change_info(hdev, event_type);
hl_fw_unmask_irq(hdev, event_type);
@@ -7330,8 +7589,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
- mutex_lock(&hdev->mmu_cache_lock);
-
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_PS, 3);
WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
@@ -7347,8 +7604,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
WREG32(mmSTLB_INV_SET, 0);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -7371,8 +7626,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
hdev->hard_reset_pending)
return 0;
- mutex_lock(&hdev->mmu_cache_lock);
-
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
else
@@ -7400,8 +7653,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
1000,
timeout_usec);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -7463,7 +7714,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0);
+ rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
if (rc)
return rc;
@@ -7483,13 +7734,14 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
return 0;
}
-static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
- struct seq_file *s)
+static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s)
{
struct gaudi_device *gaudi = hdev->asic_specific;
const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
const char *nic_fmt = "%-5d%-9s%#-14x%#x\n";
+ unsigned long *mask = (unsigned long *)mask_arr;
u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
bool is_idle = true, is_eng_idle, is_slave;
u64 offset;
@@ -7515,9 +7767,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_DMA_IDLE(dma_core_sts0);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_DMA_0 + dma_id);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask);
if (s)
seq_printf(s, fmt, dma_id,
is_eng_idle ? "Y" : "N", qm_glbl_sts0,
@@ -7538,9 +7789,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_TPC_IDLE(tpc_cfg_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_TPC_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask);
if (s)
seq_printf(s, fmt, i,
is_eng_idle ? "Y" : "N",
@@ -7567,9 +7817,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_MME_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask);
if (s) {
if (!is_slave)
seq_printf(s, fmt, i,
@@ -7595,9 +7844,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_NIC_0 + port);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
if (s)
seq_printf(s, nic_fmt, port,
is_eng_idle ? "Y" : "N",
@@ -7611,9 +7859,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GAUDI_ENGINE_ID_NIC_0 + port);
+ if (mask && !is_eng_idle)
+ set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
if (s)
seq_printf(s, nic_fmt, port,
is_eng_idle ? "Y" : "N",
@@ -7876,18 +8123,16 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
static int gaudi_ctx_init(struct hl_ctx *ctx)
{
+ if (ctx->asid == HL_KERNEL_ASID_ID)
+ return 0;
+
gaudi_mmu_prepare(ctx->hdev, ctx->asid);
return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
}
static void gaudi_ctx_fini(struct hl_ctx *ctx)
{
- struct hl_device *hdev = ctx->hdev;
-
- /* Gaudi will NEVER support more then a single compute context.
- * Therefore, don't clear anything unless it is the compute context
- */
- if (hdev->compute_ctx != ctx)
+ if (ctx->asid == HL_KERNEL_ASID_ID)
return;
gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
@@ -7928,10 +8173,10 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, eb);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -7948,10 +8193,10 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 0); /* last pkt MB */
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -7997,10 +8242,10 @@ static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev,
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -8018,10 +8263,10 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
- ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
+ ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_FENCE);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
@@ -8217,12 +8462,16 @@ static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+ int rc;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
- WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4,
- 0);
+ rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx,
+ CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob->sob_id * 4, 1, 0);
+ if (rc)
+ dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id);
kref_init(&hw_sob->kref);
}
@@ -8246,6 +8495,24 @@ static u64 gaudi_get_device_time(struct hl_device *hdev)
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
+static int gaudi_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id)
+{
+ return -EPERM;
+}
+
+static int gaudi_block_mmap(struct hl_device *hdev,
+ struct vm_area_struct *vma,
+ u32 block_id, u32 block_size)
+{
+ return -EPERM;
+}
+
+static void gaudi_enable_events_from_fw(struct hl_device *hdev)
+{
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8322,7 +8589,13 @@ static const struct hl_asic_funcs gaudi_funcs = {
.set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
.get_device_time = gaudi_get_device_time,
.collective_wait_init_cs = gaudi_collective_wait_init_cs,
- .collective_wait_create_jobs = gaudi_collective_wait_create_jobs
+ .collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
+ .scramble_addr = hl_mmu_scramble_addr,
+ .descramble_addr = hl_mmu_descramble_addr,
+ .ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
+ .get_hw_block_id = gaudi_get_hw_block_id,
+ .hw_block_mmap = gaudi_block_mmap,
+ .enable_events_from_fw = gaudi_enable_events_from_fw
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index a7ab2d7e57d4..50bb4ad570fd 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -251,11 +251,13 @@ enum gaudi_nic_mask {
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB group. group will reset once refcount is zero.
* @base_sob_id: base sob id of this SOB group.
+ * @queue_id: id of the queue that waits on this sob group
*/
struct gaudi_hw_sob_group {
struct hl_device *hdev;
struct kref kref;
u32 base_sob_id;
+ u32 queue_id;
};
#define NUM_SOB_GROUPS (HL_RSVD_SOBS * QMAN_STREAMS)
@@ -333,6 +335,7 @@ struct gaudi_device {
};
void gaudi_init_security(struct hl_device *hdev);
+void gaudi_ack_protection_bits_errors(struct hl_device *hdev);
void gaudi_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 88a09d42e111..6e56fa1c6c69 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -634,9 +634,21 @@ static int gaudi_config_etr(struct hl_device *hdev,
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
- /* Workaround for H3 #HW-2075 bug: use small data chunks */
- WREG32(mmPSOC_ETR_AXICTL, (is_host ? 0 : 0x700) |
- PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ if (hdev->asic_prop.fw_security_disabled) {
+ /* make ETR not privileged */
+ val = FIELD_PREP(
+ PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
+ /* make ETR non-secured (inverted logic) */
+ val |= FIELD_PREP(
+ PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
+ /*
+ * Workaround for H3 #HW-2075 bug: use small data
+ * chunks
+ */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK,
+ is_host ? 0 : 7);
+ WREG32(mmPSOC_ETR_AXICTL, val);
+ }
WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI,
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index e10181692d0b..7085f45814ae 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -13052,3 +13052,8 @@ void gaudi_init_security(struct hl_device *hdev)
gaudi_init_protection_bits(hdev);
}
+
+void gaudi_ack_protection_bits_errors(struct hl_device *hdev)
+{
+
+}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 63679a747d2c..ed566c52ccaa 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -455,6 +455,11 @@ int goya_get_fixed_properties(struct hl_device *hdev)
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
+ prop->first_available_user_msix_interrupt = USHRT_MAX;
+
+ for (i = 0 ; i < HL_MAX_DCORES ; i++)
+ prop->first_available_cq[i] = USHRT_MAX;
+
/* disable fw security for now, set it in a later stage */
prop->fw_security_disabled = true;
prop->fw_security_status_valid = false;
@@ -792,9 +797,6 @@ int goya_late_init(struct hl_device *hdev)
return rc;
}
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
-
return 0;
}
@@ -1186,6 +1188,7 @@ static int goya_stop_external_queues(struct hl_device *hdev)
int goya_init_cpu_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_eq *eq;
u32 status;
struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
@@ -1238,6 +1241,10 @@ int goya_init_cpu_queues(struct hl_device *hdev)
return -EIO;
}
+ /* update FW application security bits */
+ if (prop->fw_security_status_valid)
+ prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
+
goya->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
@@ -2804,9 +2811,12 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
- if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
+ if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) {
+ /* make sure device CPU will read latest data from host */
+ mb();
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+ }
}
void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
@@ -2914,7 +2924,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
dev_err_ratelimited(hdev->dev,
"Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
@@ -3876,10 +3886,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -3948,10 +3958,10 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
patched_cb_handle >>= PAGE_SHIFT;
parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
(u32) patched_cb_handle);
- /* hl_cb_get should never fail here so use kernel WARN */
- WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here */
if (!parser->patched_cb) {
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
rc = -EFAULT;
goto out;
}
@@ -4122,9 +4132,6 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
} else {
rc = -EFAULT;
}
@@ -4178,9 +4185,6 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
} else {
rc = -EFAULT;
}
@@ -4223,9 +4227,6 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
} else {
rc = -EFAULT;
}
@@ -4266,9 +4267,6 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
- } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
} else {
rc = -EFAULT;
}
@@ -4877,8 +4875,6 @@ int goya_context_switch(struct hl_device *hdev, u32 asid)
WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
- goya_mmu_prepare(hdev, asid);
-
goya_clear_sm_regs(hdev);
return 0;
@@ -5044,7 +5040,7 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
return;
if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
- WARN(1, "asid %u is too big\n", asid);
+ dev_crit(hdev->dev, "asid %u is too big\n", asid);
return;
}
@@ -5073,8 +5069,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
- mutex_lock(&hdev->mmu_cache_lock);
-
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_ALL_START, 1);
@@ -5086,8 +5080,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
1000,
timeout_usec);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -5117,8 +5109,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
- mutex_lock(&hdev->mmu_cache_lock);
-
/*
* TODO: currently invalidate entire L0 & L1 as in regular hard
* invalidation. Need to apply invalidation of specific cache lines with
@@ -5141,8 +5131,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
1000,
timeout_usec);
- mutex_unlock(&hdev->mmu_cache_lock);
-
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@@ -5172,7 +5160,7 @@ int goya_cpucp_info_get(struct hl_device *hdev)
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0);
+ rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
if (rc)
return rc;
@@ -5207,11 +5195,12 @@ static void goya_disable_clock_gating(struct hl_device *hdev)
/* clock gating not supported in Goya */
}
-static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
- struct seq_file *s)
+static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s)
{
const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
+ unsigned long *mask = (unsigned long *)mask_arr;
u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
mme_arch_sts;
bool is_idle = true, is_eng_idle;
@@ -5231,9 +5220,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_DMA_IDLE(dma_core_sts0);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GOYA_ENGINE_ID_DMA_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
if (s)
seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0);
@@ -5255,9 +5243,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_TPC_IDLE(tpc_cfg_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) <<
- (GOYA_ENGINE_ID_TPC_0 + i);
+ if (mask && !is_eng_idle)
+ set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
if (s)
seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
@@ -5276,8 +5263,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
IS_MME_IDLE(mme_arch_sts);
is_idle &= is_eng_idle;
- if (mask)
- *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
+ if (mask && !is_eng_idle)
+ set_bit(GOYA_ENGINE_ID_MME_0, mask);
if (s) {
seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
cmdq_glbl_sts0, mme_arch_sts);
@@ -5321,6 +5308,9 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
static int goya_ctx_init(struct hl_ctx *ctx)
{
+ if (ctx->asid != HL_KERNEL_ASID_ID)
+ goya_mmu_prepare(ctx->hdev, ctx->asid);
+
return 0;
}
@@ -5399,6 +5389,24 @@ static void goya_ctx_fini(struct hl_ctx *ctx)
}
+static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id)
+{
+ return -EPERM;
+}
+
+static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u32 block_id, u32 block_size)
+{
+ return -EPERM;
+}
+
+static void goya_enable_events_from_fw(struct hl_device *hdev)
+{
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5475,7 +5483,13 @@ static const struct hl_asic_funcs goya_funcs = {
.set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
.get_device_time = goya_get_device_time,
.collective_wait_init_cs = goya_collective_wait_init_cs,
- .collective_wait_create_jobs = goya_collective_wait_create_jobs
+ .collective_wait_create_jobs = goya_collective_wait_create_jobs,
+ .scramble_addr = hl_mmu_scramble_addr,
+ .descramble_addr = hl_mmu_descramble_addr,
+ .ack_protection_bits_errors = goya_ack_protection_bits_errors,
+ .get_hw_block_id = goya_get_hw_block_id,
+ .hw_block_mmap = goya_block_mmap,
+ .enable_events_from_fw = goya_enable_events_from_fw
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 8b3408211af6..23fe099ed218 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -173,6 +173,7 @@ void goya_init_mme_qmans(struct hl_device *hdev);
void goya_init_tpc_qmans(struct hl_device *hdev);
int goya_init_cpu_queues(struct hl_device *hdev);
void goya_init_security(struct hl_device *hdev);
+void goya_ack_protection_bits_errors(struct hl_device *hdev);
int goya_late_init(struct hl_device *hdev);
void goya_late_fini(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 6fa03933b438..6b7445cca580 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -434,8 +434,15 @@ static int goya_config_etr(struct hl_device *hdev,
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
- WREG32(mmPSOC_ETR_AXICTL,
- 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ if (hdev->asic_prop.fw_security_disabled) {
+ /* make ETR not privileged */
+ val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
+ /* make ETR non-secured (inverted logic) */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
+ /* burst size 8 */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, 7);
+ WREG32(mmPSOC_ETR_AXICTL, val);
+ }
WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI,
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index 14701836f92b..14c3bae3ccdc 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -3120,3 +3120,8 @@ void goya_init_security(struct hl_device *hdev)
goya_init_protection_bits(hdev);
}
+
+void goya_ack_protection_bits_errors(struct hl_device *hdev)
+{
+
+}
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 00bd9b392f93..b77c1c16c32c 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -58,11 +58,25 @@ struct hl_eq_ecc_data {
__u8 pad[7];
};
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
struct hl_eq_ecc_data ecc_data;
struct hl_eq_hbm_ecc_data hbm_ecc_data;
+ struct hl_eq_sm_sei_data sm_sei_data;
__le64 data[7];
};
};
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index b637dfd69f6e..e87f5a98e193 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -70,6 +70,9 @@
* checksum. Trying to program image again
* might solve this.
*
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -88,6 +91,7 @@
#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9)
#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10)
#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
/*
@@ -150,10 +154,22 @@
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
* CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
* FW initialized Clock Gating.
* Initialized in: preboot
*
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -175,7 +191,10 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12)
#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
index 9ccba8437ec9..49335e8334b4 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
@@ -212,6 +212,10 @@ enum gaudi_async_event_id {
GAUDI_EVENT_NIC_SEI_2 = 266,
GAUDI_EVENT_NIC_SEI_3 = 267,
GAUDI_EVENT_NIC_SEI_4 = 268,
+ GAUDI_EVENT_DMA_IF_SEI_0 = 277,
+ GAUDI_EVENT_DMA_IF_SEI_1 = 278,
+ GAUDI_EVENT_DMA_IF_SEI_2 = 279,
+ GAUDI_EVENT_DMA_IF_SEI_3 = 280,
GAUDI_EVENT_PCIE_FLR = 290,
GAUDI_EVENT_TPC0_BMON_SPMU = 300,
GAUDI_EVENT_TPC0_KRN_ERR = 301,
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index b9b90d079e23..b53aeda9a982 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -388,7 +388,10 @@ enum axi_id {
#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6)
#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6)
-#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2
+#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00
/* STLB_CACHE_INV */
#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index f30f2c0458d7..6e097ace2e96 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -78,6 +78,9 @@ struct packet_wreg_bulk {
__le64 values[0]; /* data starts here */
};
+#define GAUDI_PKT_LONG_CTL_OP_SHIFT 20
+#define GAUDI_PKT_LONG_CTL_OP_MASK 0x00300000
+
struct packet_msg_long {
__le32 value;
__le32 ctl;
@@ -111,18 +114,6 @@ struct packet_msg_long {
#define GAUDI_PKT_SHORT_CTL_BASE_SHIFT 22
#define GAUDI_PKT_SHORT_CTL_BASE_MASK 0x00C00000
-#define GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT 24
-#define GAUDI_PKT_SHORT_CTL_OPCODE_MASK 0x1F000000
-
-#define GAUDI_PKT_SHORT_CTL_EB_SHIFT 29
-#define GAUDI_PKT_SHORT_CTL_EB_MASK 0x20000000
-
-#define GAUDI_PKT_SHORT_CTL_RB_SHIFT 30
-#define GAUDI_PKT_SHORT_CTL_RB_MASK 0x40000000
-
-#define GAUDI_PKT_SHORT_CTL_MB_SHIFT 31
-#define GAUDI_PKT_SHORT_CTL_MB_MASK 0x80000000
-
struct packet_msg_short {
__le32 value;
__le32 ctl;
@@ -146,18 +137,6 @@ struct packet_msg_prot {
#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
-#define GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT 24
-#define GAUDI_PKT_FENCE_CTL_OPCODE_MASK 0x1F000000
-
-#define GAUDI_PKT_FENCE_CTL_EB_SHIFT 29
-#define GAUDI_PKT_FENCE_CTL_EB_MASK 0x20000000
-
-#define GAUDI_PKT_FENCE_CTL_RB_SHIFT 30
-#define GAUDI_PKT_FENCE_CTL_RB_MASK 0x40000000
-
-#define GAUDI_PKT_FENCE_CTL_MB_SHIFT 31
-#define GAUDI_PKT_FENCE_CTL_MB_MASK 0x80000000
-
struct packet_fence {
__le32 cfg;
__le32 ctl;
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 067489bd048e..9ff3cb245580 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -259,6 +259,9 @@
#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
-#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2
+#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00
#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 2907db260fba..935acc6bbf3c 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -44,7 +44,8 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
bus = cl->dev;
mutex_lock(&bus->device_lock);
- if (bus->dev_state != MEI_DEV_ENABLED) {
+ if (bus->dev_state != MEI_DEV_ENABLED &&
+ bus->dev_state != MEI_DEV_POWERING_DOWN) {
rets = -ENODEV;
goto out;
}
@@ -60,6 +61,13 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
goto out;
}
+ if (vtag) {
+ /* Check if vtag is supported by client */
+ rets = mei_cl_vt_support_check(cl);
+ if (rets)
+ goto out;
+ }
+
if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
@@ -128,7 +136,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
bus = cl->dev;
mutex_lock(&bus->device_lock);
- if (bus->dev_state != MEI_DEV_ENABLED) {
+ if (bus->dev_state != MEI_DEV_ENABLED &&
+ bus->dev_state != MEI_DEV_POWERING_DOWN) {
rets = -ENODEV;
goto out;
}
@@ -878,22 +887,17 @@ static int mei_cl_device_probe(struct device *dev)
static int mei_cl_device_remove(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
- struct mei_cl_driver *cldrv;
- int ret = 0;
+ struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
- if (!cldev || !dev->driver)
- return 0;
-
- cldrv = to_mei_cl_driver(dev->driver);
if (cldrv->remove)
- ret = cldrv->remove(cldev);
+ cldrv->remove(cldev);
mei_cldev_unregister_callbacks(cldev);
mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
- return ret;
+ return 0;
}
static ssize_t name_show(struct device *dev, struct device_attribute *a,
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index a56d41321f32..4378a9b25848 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
#include <linux/mei.h>
@@ -990,7 +991,8 @@ int mei_cl_disconnect(struct mei_cl *cl)
return 0;
}
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
+ dev->dev_state == MEI_DEV_POWER_DOWN) {
cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
mei_cl_set_disconnected(cl);
return 0;
@@ -1737,7 +1739,7 @@ static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
*
* @cb: message callback structure
*
- * Return: a pointer to initialized header
+ * Return: a pointer to initialized header or ERR_PTR on failure
*/
static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
{
@@ -2113,6 +2115,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
case MEI_FOP_DISCONNECT:
case MEI_FOP_NOTIFY_STOP:
case MEI_FOP_NOTIFY_START:
+ case MEI_FOP_DMA_MAP:
+ case MEI_FOP_DMA_UNMAP:
if (waitqueue_active(&cl->wait))
wake_up(&cl->wait);
@@ -2139,3 +2143,286 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
+
+static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
+{
+ struct mei_cl *cl;
+
+ list_for_each_entry(cl, &dev->file_list, link)
+ if (cl->dma.buffer_id == buffer_id)
+ return cl;
+ return NULL;
+}
+
+/**
+ * mei_cl_irq_dma_map - send client dma map request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if ((u32)slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_hbm_cl_dma_map_req(dev, cl);
+ if (ret) {
+ cl->status = ret;
+ list_move_tail(&cb->list, cmpl_list);
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ return 0;
+}
+
+/**
+ * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if ((u32)slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_hbm_cl_dma_unmap_req(dev, cl);
+ if (ret) {
+ cl->status = ret;
+ list_move_tail(&cb->list, cmpl_list);
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ return 0;
+}
+
+static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
+{
+ cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
+ &cl->dma.daddr, GFP_KERNEL);
+ if (!cl->dma.vaddr)
+ return -ENOMEM;
+
+ cl->dma.buffer_id = buf_id;
+ cl->dma.size = size;
+
+ return 0;
+}
+
+static void mei_cl_dma_free(struct mei_cl *cl)
+{
+ cl->dma.buffer_id = 0;
+ dmam_free_coherent(cl->dev->dev,
+ cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
+ cl->dma.size = 0;
+ cl->dma.vaddr = NULL;
+ cl->dma.daddr = 0;
+}
+
+/**
+ * mei_cl_alloc_and_map - send client dma map request
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ * @buffer_id: id of the mapped buffer
+ * @size: size of the buffer
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return:
+ * * -ENODEV
+ * * -EINVAL
+ * * -EOPNOTSUPP
+ * * -EPROTO
+ * * -ENOMEM;
+ */
+int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
+ u8 buffer_id, size_t size)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (!dev->hbm_f_cd_supported) {
+ cl_dbg(dev, cl, "client dma is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (buffer_id == 0)
+ return -EINVAL;
+
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
+ if (cl->dma_mapped)
+ return -EPROTO;
+
+ if (mei_cl_dma_map_find(dev, buffer_id)) {
+ cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
+ cl->dma.buffer_id);
+ return -EPROTO;
+ }
+
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
+ rets = mei_cl_dma_alloc(cl, buffer_id, size);
+ if (rets) {
+ pm_runtime_put_noidle(dev->dev);
+ return rets;
+ }
+
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ if (mei_hbuf_acquire(dev)) {
+ if (mei_hbm_cl_dma_map_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait,
+ cl->dma_mapped ||
+ cl->status ||
+ !mei_cl_is_connected(cl),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+
+ if (!cl->dma_mapped && !cl->status)
+ cl->status = -EFAULT;
+
+ rets = cl->status;
+
+out:
+ if (rets)
+ mei_cl_dma_free(cl);
+
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_unmap_and_free - send client dma unmap request
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (!dev->hbm_f_cd_supported) {
+ cl_dbg(dev, cl, "client dma is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
+ if (!cl->dma_mapped)
+ return -EPROTO;
+
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ if (mei_hbuf_acquire(dev)) {
+ if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait,
+ !cl->dma_mapped ||
+ cl->status ||
+ !mei_cl_is_connected(cl),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+
+ if (cl->dma_mapped && !cl->status)
+ cl->status = -EFAULT;
+
+ rets = cl->status;
+
+ if (!rets)
+ mei_cl_dma_free(cl);
+out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ mei_io_cb_free(cb);
+ return rets;
+}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9e08a9843bba..b12cdcde9436 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -265,6 +265,14 @@ void mei_cl_notify(struct mei_cl *cl);
void mei_cl_all_disconnect(struct mei_device *dev);
+int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list);
+int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct list_head *cmpl_list);
+int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
+ u8 buffer_id, size_t size);
+int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp);
+
#define MEI_CL_FMT "cl:host=%02d me=%02d "
#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 3ab1a431d810..1ce61e9e24fc 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -106,6 +106,7 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported);
seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported);
seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported);
+ seq_printf(m, "\tCD: %01d\n", dev->hbm_f_cd_supported);
}
seq_printf(m, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 686e8b6a4c55..d0277c7fed10 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -339,7 +339,9 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD;
if (dev->hbm_f_vt_supported)
- req.capability_requested[0] = HBM_CAP_VT;
+ req.capability_requested[0] |= HBM_CAP_VT;
+ if (dev->hbm_f_cd_supported)
+ req.capability_requested[0] |= HBM_CAP_CD;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
@@ -593,6 +595,117 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
}
/**
+ * mei_hbm_cl_dma_map_req - send client dma map request
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_client_dma_map_request req;
+ int ret;
+
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+
+ memset(&req, 0, sizeof(req));
+
+ req.hbm_cmd = MEI_HBM_CLIENT_DMA_MAP_REQ_CMD;
+ req.client_buffer_id = cl->dma.buffer_id;
+ req.address_lsb = lower_32_bits(cl->dma.daddr);
+ req.address_msb = upper_32_bits(cl->dma.daddr);
+ req.size = cl->dma.size;
+
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
+ if (ret)
+ dev_err(dev->dev, "dma map request failed: ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_hbm_cl_dma_unmap_req - send client dma unmap request
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_client_dma_unmap_request req;
+ int ret;
+
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+
+ memset(&req, 0, sizeof(req));
+
+ req.hbm_cmd = MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD;
+ req.client_buffer_id = cl->dma.buffer_id;
+
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
+ if (ret)
+ dev_err(dev->dev, "dma unmap request failed: ret = %d\n", ret);
+
+ return ret;
+}
+
+static void mei_hbm_cl_dma_map_res(struct mei_device *dev,
+ struct hbm_client_dma_response *res)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *cb, *next;
+
+ cl = NULL;
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
+ if (cb->fop_type != MEI_FOP_DMA_MAP)
+ continue;
+ if (!cb->cl->dma.buffer_id || cb->cl->dma_mapped)
+ continue;
+
+ cl = cb->cl;
+ break;
+ }
+ if (!cl)
+ return;
+
+ dev_dbg(dev->dev, "cl dma map result = %d\n", res->status);
+ cl->status = res->status;
+ if (!cl->status)
+ cl->dma_mapped = 1;
+ wake_up(&cl->wait);
+}
+
+static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev,
+ struct hbm_client_dma_response *res)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *cb, *next;
+
+ cl = NULL;
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
+ if (cb->fop_type != MEI_FOP_DMA_UNMAP)
+ continue;
+ if (!cb->cl->dma.buffer_id || !cb->cl->dma_mapped)
+ continue;
+
+ cl = cb->cl;
+ break;
+ }
+ if (!cl)
+ return;
+
+ dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status);
+ cl->status = res->status;
+ if (!cl->status)
+ cl->dma_mapped = 0;
+ wake_up(&cl->wait);
+}
+
+/**
* mei_hbm_prop_req - request property for a single client
*
* @dev: the device structure
@@ -1085,6 +1198,13 @@ static void mei_hbm_config_features(struct mei_device *dev)
(dev->version.major_version == HBM_MAJOR_VERSION_CAP &&
dev->version.minor_version >= HBM_MINOR_VERSION_CAP))
dev->hbm_f_cap_supported = 1;
+
+ /* Client DMA Support */
+ dev->hbm_f_cd_supported = 0;
+ if (dev->version.major_version > HBM_MAJOR_VERSION_CD ||
+ (dev->version.major_version == HBM_MAJOR_VERSION_CD &&
+ dev->version.minor_version >= HBM_MINOR_VERSION_CD))
+ dev->hbm_f_cd_supported = 1;
}
/**
@@ -1124,6 +1244,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
struct mei_hbm_cl_cmd *cl_cmd;
struct hbm_client_connect_request *disconnect_req;
struct hbm_flow_control *fctrl;
+ struct hbm_client_dma_response *client_dma_res;
/* read the message to our buffer */
BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
@@ -1177,6 +1298,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_STARTING) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1215,7 +1340,12 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->init_clients_timer = 0;
- if (dev->hbm_state != MEI_HBM_CAP_SETUP) {
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_CAP_SETUP) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1224,6 +1354,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
capability_res = (struct hbm_capability_response *)mei_msg;
if (!(capability_res->capability_granted[0] & HBM_CAP_VT))
dev->hbm_f_vt_supported = 0;
+ if (!(capability_res->capability_granted[0] & HBM_CAP_CD))
+ dev->hbm_f_cd_supported = 0;
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
@@ -1247,7 +1379,12 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->init_clients_timer = 0;
- if (dev->hbm_state != MEI_HBM_DR_SETUP) {
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_DR_SETUP) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1311,6 +1448,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1349,6 +1490,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
+ return 0;
+ }
dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
@@ -1373,7 +1518,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
return -EPROTO;
}
- dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
dev_info(dev->dev, "hbm: stop response: resetting.\n");
/* force the reset */
return -EPROTO;
@@ -1426,6 +1571,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_hbm_cl_notify(dev, cl_cmd);
break;
+ case MEI_HBM_CLIENT_DMA_MAP_RES_CMD:
+ dev_dbg(dev->dev, "hbm: client dma map response: message received.\n");
+ client_dma_res = (struct hbm_client_dma_response *)mei_msg;
+ mei_hbm_cl_dma_map_res(dev, client_dma_res);
+ break;
+
+ case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD:
+ dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n");
+ client_dma_res = (struct hbm_client_dma_response *)mei_msg;
+ mei_hbm_cl_dma_unmap_res(dev, client_dma_res);
+ break;
+
default:
WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd);
return -EPROTO;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 4d95e38e4ddf..cd5b08ca34b6 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -10,6 +10,7 @@
struct mei_device;
struct mei_msg_hdr;
struct mei_cl;
+struct mei_dma_data;
/**
* enum mei_hbm_state - host bus message protocol state
@@ -51,6 +52,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
void mei_hbm_pg_resume(struct mei_device *dev);
int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_cl *cl, u8 request);
-
+int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl);
#endif /* _MEI_HBM_H_ */
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 3506a3534294..ec2a4fce8581 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -844,16 +844,19 @@ enable_err_exit:
return ret;
}
-static int mei_hdcp_remove(struct mei_cl_device *cldev)
+static void mei_hdcp_remove(struct mei_cl_device *cldev)
{
struct i915_hdcp_comp_master *comp_master =
mei_cldev_get_drvdata(cldev);
+ int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);
kfree(comp_master);
mei_cldev_set_drvdata(cldev, NULL);
- return mei_cldev_disable(cldev);
+ ret = mei_cldev_disable(cldev);
+ if (ret)
+ dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
}
#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9cf8d8f60cfe..14be76d4c2e6 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -101,6 +101,11 @@
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+#define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */
+
+#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
+#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index df2fb9520dd8..b10606550613 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -88,6 +88,12 @@
#define HBM_MINOR_VERSION_CAP 2
#define HBM_MAJOR_VERSION_CAP 2
+/*
+ * MEI version with client DMA support
+ */
+#define HBM_MINOR_VERSION_CD 2
+#define HBM_MAJOR_VERSION_CD 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -136,6 +142,12 @@
#define MEI_HBM_CAPABILITIES_REQ_CMD 0x13
#define MEI_HBM_CAPABILITIES_RES_CMD 0x93
+#define MEI_HBM_CLIENT_DMA_MAP_REQ_CMD 0x14
+#define MEI_HBM_CLIENT_DMA_MAP_RES_CMD 0x94
+
+#define MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD 0x15
+#define MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD 0x95
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -648,6 +660,8 @@ struct hbm_dma_ring_ctrl {
/* virtual tag supported */
#define HBM_CAP_VT BIT(0)
+/* client dma supported */
+#define HBM_CAP_CD BIT(2)
/**
* struct hbm_capability_request - capability request from host to fw
@@ -671,4 +685,51 @@ struct hbm_capability_response {
u8 capability_granted[3];
} __packed;
+/**
+ * struct hbm_client_dma_map_request - client dma map request from host to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @client_buffer_id: client buffer id
+ * @reserved: reserved
+ * @address_lsb: DMA address LSB
+ * @address_msb: DMA address MSB
+ * @size: DMA size
+ */
+struct hbm_client_dma_map_request {
+ u8 hbm_cmd;
+ u8 client_buffer_id;
+ u8 reserved[2];
+ u32 address_lsb;
+ u32 address_msb;
+ u32 size;
+} __packed;
+
+/**
+ * struct hbm_client_dma_unmap_request
+ * client dma unmap request from the host to the firmware
+ *
+ * @hbm_cmd: bus message command header
+ * @status: unmap status
+ * @client_buffer_id: client buffer id
+ * @reserved: reserved
+ */
+struct hbm_client_dma_unmap_request {
+ u8 hbm_cmd;
+ u8 status;
+ u8 client_buffer_id;
+ u8 reserved;
+} __packed;
+
+/**
+ * struct hbm_client_dma_response
+ * client dma unmap response from the firmware to the host
+ *
+ * @hbm_cmd: bus message command header
+ * @status: command status
+ */
+struct hbm_client_dma_response {
+ u8 hbm_cmd;
+ u8 status;
+} __packed;
+
#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index bcee77768b91..5c8cb679b997 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -303,9 +303,12 @@ void mei_stop(struct mei_device *dev)
dev_dbg(dev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
- mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ mei_set_devstate(dev, MEI_DEV_POWERING_DOWN);
mutex_unlock(&dev->device_lock);
mei_cl_bus_remove_devices(dev);
+ mutex_lock(&dev->device_lock);
+ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ mutex_unlock(&dev->device_lock);
mei_cancel_work(dev);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 326955b04fda..a98f6b895af7 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -295,12 +295,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
static inline int hdr_is_valid(u32 msg_hdr)
{
struct mei_msg_hdr *mei_hdr;
+ u32 expected_len = 0;
mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
if (!msg_hdr || mei_hdr->reserved)
return -EBADMSG;
- if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE)
+ if (mei_hdr->dma_ring)
+ expected_len += MEI_SLOT_SIZE;
+ if (mei_hdr->extended)
+ expected_len += MEI_SLOT_SIZE;
+ if (mei_hdr->length < expected_len)
return -EBADMSG;
return 0;
@@ -324,6 +329,8 @@ int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl *cl;
int ret;
u32 ext_meta_hdr_u32;
+ u32 hdr_size_left;
+ u32 hdr_size_ext;
int i;
int ext_hdr_end;
@@ -353,6 +360,7 @@ int mei_irq_read_handler(struct mei_device *dev,
}
ext_hdr_end = 1;
+ hdr_size_left = mei_hdr->length;
if (mei_hdr->extended) {
if (!dev->rd_msg_hdr[1]) {
@@ -363,8 +371,21 @@ int mei_irq_read_handler(struct mei_device *dev,
dev_dbg(dev->dev, "extended header is %08x\n",
ext_meta_hdr_u32);
}
- meta_hdr = ((struct mei_ext_meta_hdr *)
- dev->rd_msg_hdr + 1);
+ meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1);
+ if (check_add_overflow((u32)sizeof(*meta_hdr),
+ mei_slots2data(meta_hdr->size),
+ &hdr_size_ext)) {
+ dev_err(dev->dev, "extended message size too big %d\n",
+ meta_hdr->size);
+ return -EBADMSG;
+ }
+ if (hdr_size_left < hdr_size_ext) {
+ dev_err(dev->dev, "corrupted message header len %d\n",
+ mei_hdr->length);
+ return -EBADMSG;
+ }
+ hdr_size_left -= hdr_size_ext;
+
ext_hdr_end = meta_hdr->size + 2;
for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
dev->rd_msg_hdr[i] = mei_read_hdr(dev);
@@ -376,6 +397,12 @@ int mei_irq_read_handler(struct mei_device *dev,
}
if (mei_hdr->dma_ring) {
+ if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
+ dev_err(dev->dev, "corrupted message header len %d\n",
+ mei_hdr->length);
+ return -EBADMSG;
+ }
+
dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
@@ -520,6 +547,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
if (ret)
return ret;
break;
+ case MEI_FOP_DMA_MAP:
+ ret = mei_cl_irq_dma_map(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
+ case MEI_FOP_DMA_UNMAP:
+ ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
default:
BUG();
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 9f6682033ed7..28937b6e7e0c 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1026,7 +1026,7 @@ static ssize_t tx_queue_limit_show(struct device *device,
size = dev->tx_queue_limit;
mutex_unlock(&dev->device_lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t tx_queue_limit_store(struct device *device,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 8c395bfdf6f3..b7b6ef344e80 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -57,6 +57,7 @@ enum mei_dev_state {
MEI_DEV_ENABLED,
MEI_DEV_RESETTING,
MEI_DEV_DISABLED,
+ MEI_DEV_POWERING_DOWN,
MEI_DEV_POWER_DOWN,
MEI_DEV_POWER_UP
};
@@ -78,6 +79,8 @@ enum mei_file_transaction_states {
* @MEI_FOP_DISCONNECT_RSP: disconnect response
* @MEI_FOP_NOTIFY_START: start notification
* @MEI_FOP_NOTIFY_STOP: stop notification
+ * @MEI_FOP_DMA_MAP: request client dma map
+ * @MEI_FOP_DMA_UNMAP: request client dma unmap
*/
enum mei_cb_file_ops {
MEI_FOP_READ = 0,
@@ -87,6 +90,8 @@ enum mei_cb_file_ops {
MEI_FOP_DISCONNECT_RSP,
MEI_FOP_NOTIFY_START,
MEI_FOP_NOTIFY_STOP,
+ MEI_FOP_DMA_MAP,
+ MEI_FOP_DMA_UNMAP,
};
/**
@@ -112,6 +117,13 @@ struct mei_msg_data {
unsigned char *data;
};
+struct mei_dma_data {
+ u8 buffer_id;
+ void *vaddr;
+ dma_addr_t daddr;
+ size_t size;
+};
+
/**
* struct mei_dma_dscr - dma address descriptor
*
@@ -235,6 +247,8 @@ struct mei_cl_vtag {
* @rd_pending: pending read credits
* @rd_completed_lock: protects rd_completed queue
* @rd_completed: completed read
+ * @dma: dma settings
+ * @dma_mapped: dma buffer is currently mapped.
*
* @cldev: device on the mei client bus
*/
@@ -262,6 +276,8 @@ struct mei_cl {
struct list_head rd_pending;
spinlock_t rd_completed_lock; /* protects rd_completed queue */
struct list_head rd_completed;
+ struct mei_dma_data dma;
+ u8 dma_mapped;
struct mei_cl_device *cldev;
};
@@ -450,6 +466,7 @@ struct mei_fw_version {
* @hbm_f_dr_supported : hbm feature dma ring supported
* @hbm_f_vt_supported : hbm feature vtag supported
* @hbm_f_cap_supported : hbm feature capabilities message supported
+ * @hbm_f_cd_supported : hbm feature client dma supported
*
* @fw_ver : FW versions
*
@@ -537,6 +554,7 @@ struct mei_device {
unsigned int hbm_f_dr_supported:1;
unsigned int hbm_f_vt_supported:1;
unsigned int hbm_f_cap_supported:1;
+ unsigned int hbm_f_cd_supported:1;
struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 1de9ef7a272b..a7e179626b63 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -107,6 +107,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
deleted file mode 100644
index 7236ae527b19..000000000000
--- a/drivers/misc/pti.c
+++ /dev/null
@@ -1,978 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * pti.c - PTI driver for cJTAG data extration
- *
- * Copyright (C) Intel 2010
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/pci.h>
-#include <linux/mutex.h>
-#include <linux/miscdevice.h>
-#include <linux/intel-pti.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#define DRIVERNAME "pti"
-#define PCINAME "pciPTI"
-#define TTYNAME "ttyPTI"
-#define CHARNAME "pti"
-#define PTITTY_MINOR_START 0
-#define PTITTY_MINOR_NUM 2
-#define MAX_APP_IDS 16 /* 128 channel ids / u8 bit size */
-#define MAX_OS_IDS 16 /* 128 channel ids / u8 bit size */
-#define MAX_MODEM_IDS 16 /* 128 channel ids / u8 bit size */
-#define MODEM_BASE_ID 71 /* modem master ID address */
-#define CONTROL_ID 72 /* control master ID address */
-#define CONSOLE_ID 73 /* console master ID address */
-#define OS_BASE_ID 74 /* base OS master ID address */
-#define APP_BASE_ID 80 /* base App master ID address */
-#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */
-#define USER_COPY_SIZE 8192 /* 8Kb buffer for user space copy */
-#define APERTURE_14 0x3800000 /* offset to first OS write addr */
-#define APERTURE_LEN 0x400000 /* address length */
-
-struct pti_tty {
- struct pti_masterchannel *mc;
-};
-
-struct pti_dev {
- struct tty_port port[PTITTY_MINOR_NUM];
- unsigned long pti_addr;
- unsigned long aperture_base;
- void __iomem *pti_ioaddr;
- u8 ia_app[MAX_APP_IDS];
- u8 ia_os[MAX_OS_IDS];
- u8 ia_modem[MAX_MODEM_IDS];
-};
-
-/*
- * This protects access to ia_app, ia_os, and ia_modem,
- * which keeps track of channels allocated in
- * an aperture write id.
- */
-static DEFINE_MUTEX(alloclock);
-
-static const struct pci_device_id pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
- {0}
-};
-
-static struct tty_driver *pti_tty_driver;
-static struct pti_dev *drv_data;
-
-static unsigned int pti_console_channel;
-static unsigned int pti_control_channel;
-
-/**
- * pti_write_to_aperture()- The private write function to PTI HW.
- *
- * @mc: The 'aperture'. It's part of a write address that holds
- * a master and channel ID.
- * @buf: Data being written to the HW that will ultimately be seen
- * in a debugging tool (Fido, Lauterbach).
- * @len: Size of buffer.
- *
- * Since each aperture is specified by a unique
- * master/channel ID, no two processes will be writing
- * to the same aperture at the same time so no lock is required. The
- * PTI-Output agent will send these out in the order that they arrived, and
- * thus, it will intermix these messages. The debug tool can then later
- * regroup the appropriate message segments together reconstituting each
- * message.
- */
-static void pti_write_to_aperture(struct pti_masterchannel *mc,
- u8 *buf,
- int len)
-{
- int dwordcnt;
- int final;
- int i;
- u32 ptiword;
- u32 __iomem *aperture;
- u8 *p = buf;
-
- /*
- * calculate the aperture offset from the base using the master and
- * channel id's.
- */
- aperture = drv_data->pti_ioaddr + (mc->master << 15)
- + (mc->channel << 8);
-
- dwordcnt = len >> 2;
- final = len - (dwordcnt << 2); /* final = trailing bytes */
- if (final == 0 && dwordcnt != 0) { /* always need a final dword */
- final += 4;
- dwordcnt--;
- }
-
- for (i = 0; i < dwordcnt; i++) {
- ptiword = be32_to_cpu(*(u32 *)p);
- p += 4;
- iowrite32(ptiword, aperture);
- }
-
- aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */
-
- ptiword = 0;
- for (i = 0; i < final; i++)
- ptiword |= *p++ << (24-(8*i));
-
- iowrite32(ptiword, aperture);
- return;
-}
-
-/**
- * pti_control_frame_built_and_sent()- control frame build and send function.
- *
- * @mc: The master / channel structure on which the function
- * built a control frame.
- * @thread_name: The thread name associated with the master / channel or
- * 'NULL' if using the 'current' global variable.
- *
- * To be able to post process the PTI contents on host side, a control frame
- * is added before sending any PTI content. So the host side knows on
- * each PTI frame the name of the thread using a dedicated master / channel.
- * The thread name is retrieved from 'current' global variable if 'thread_name'
- * is 'NULL', else it is retrieved from 'thread_name' parameter.
- * This function builds this frame and sends it to a master ID CONTROL_ID.
- * The overhead is only 32 bytes since the driver only writes to HW
- * in 32 byte chunks.
- */
-static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
- const char *thread_name)
-{
- /*
- * Since we access the comm member in current's task_struct, we only
- * need to be as large as what 'comm' in that structure is.
- */
- char comm[TASK_COMM_LEN];
- struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
- .channel = 0};
- const char *thread_name_p;
- const char *control_format = "%3d %3d %s";
- u8 control_frame[CONTROL_FRAME_LEN];
-
- if (!thread_name) {
- if (!in_interrupt())
- get_task_comm(comm, current);
- else
- strncpy(comm, "Interrupt", TASK_COMM_LEN);
-
- /* Absolutely ensure our buffer is zero terminated. */
- comm[TASK_COMM_LEN-1] = 0;
- thread_name_p = comm;
- } else {
- thread_name_p = thread_name;
- }
-
- mccontrol.channel = pti_control_channel;
- pti_control_channel = (pti_control_channel + 1) & 0x7f;
-
- snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master,
- mc->channel, thread_name_p);
- pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame));
-}
-
-/**
- * pti_write_full_frame_to_aperture()- high level function to
- * write to PTI.
- *
- * @mc: The 'aperture'. It's part of a write address that holds
- * a master and channel ID.
- * @buf: Data being written to the HW that will ultimately be seen
- * in a debugging tool (Fido, Lauterbach).
- * @len: Size of buffer.
- *
- * All threads sending data (either console, user space application, ...)
- * are calling the high level function to write to PTI meaning that it is
- * possible to add a control frame before sending the content.
- */
-static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc,
- const unsigned char *buf,
- int len)
-{
- pti_control_frame_built_and_sent(mc, NULL);
- pti_write_to_aperture(mc, (u8 *)buf, len);
-}
-
-/**
- * get_id()- Allocate a master and channel ID.
- *
- * @id_array: an array of bits representing what channel
- * id's are allocated for writing.
- * @max_ids: The max amount of available write IDs to use.
- * @base_id: The starting SW channel ID, based on the Intel
- * PTI arch.
- * @thread_name: The thread name associated with the master / channel or
- * 'NULL' if using the 'current' global variable.
- *
- * Returns:
- * pti_masterchannel struct with master, channel ID address
- * 0 for error
- *
- * Each bit in the arrays ia_app and ia_os correspond to a master and
- * channel id. The bit is one if the id is taken and 0 if free. For
- * every master there are 128 channel id's.
- */
-static struct pti_masterchannel *get_id(u8 *id_array,
- int max_ids,
- int base_id,
- const char *thread_name)
-{
- struct pti_masterchannel *mc;
- int i, j, mask;
-
- mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL);
- if (mc == NULL)
- return NULL;
-
- /* look for a byte with a free bit */
- for (i = 0; i < max_ids; i++)
- if (id_array[i] != 0xff)
- break;
- if (i == max_ids) {
- kfree(mc);
- return NULL;
- }
- /* find the bit in the 128 possible channel opportunities */
- mask = 0x80;
- for (j = 0; j < 8; j++) {
- if ((id_array[i] & mask) == 0)
- break;
- mask >>= 1;
- }
-
- /* grab it */
- id_array[i] |= mask;
- mc->master = base_id;
- mc->channel = ((i & 0xf)<<3) + j;
- /* write new master Id / channel Id allocation to channel control */
- pti_control_frame_built_and_sent(mc, thread_name);
- return mc;
-}
-
-/*
- * The following three functions:
- * pti_request_mastercahannel(), mipi_release_masterchannel()
- * and pti_writedata() are an API for other kernel drivers to
- * access PTI.
- */
-
-/**
- * pti_request_masterchannel()- Kernel API function used to allocate
- * a master, channel ID address
- * to write to PTI HW.
- *
- * @type: 0- request Application master, channel aperture ID
- * write address.
- * 1- request OS master, channel aperture ID write
- * address.
- * 2- request Modem master, channel aperture ID
- * write address.
- * Other values, error.
- * @thread_name: The thread name associated with the master / channel or
- * 'NULL' if using the 'current' global variable.
- *
- * Returns:
- * pti_masterchannel struct
- * 0 for error
- */
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
- const char *thread_name)
-{
- struct pti_masterchannel *mc;
-
- mutex_lock(&alloclock);
-
- switch (type) {
-
- case 0:
- mc = get_id(drv_data->ia_app, MAX_APP_IDS,
- APP_BASE_ID, thread_name);
- break;
-
- case 1:
- mc = get_id(drv_data->ia_os, MAX_OS_IDS,
- OS_BASE_ID, thread_name);
- break;
-
- case 2:
- mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS,
- MODEM_BASE_ID, thread_name);
- break;
- default:
- mc = NULL;
- }
-
- mutex_unlock(&alloclock);
- return mc;
-}
-EXPORT_SYMBOL_GPL(pti_request_masterchannel);
-
-/**
- * pti_release_masterchannel()- Kernel API function used to release
- * a master, channel ID address
- * used to write to PTI HW.
- *
- * @mc: master, channel apeture ID address to be released. This
- * will de-allocate the structure via kfree().
- */
-void pti_release_masterchannel(struct pti_masterchannel *mc)
-{
- u8 master, channel, i;
-
- mutex_lock(&alloclock);
-
- if (mc) {
- master = mc->master;
- channel = mc->channel;
-
- if (master == APP_BASE_ID) {
- i = channel >> 3;
- drv_data->ia_app[i] &= ~(0x80>>(channel & 0x7));
- } else if (master == OS_BASE_ID) {
- i = channel >> 3;
- drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7));
- } else {
- i = channel >> 3;
- drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7));
- }
-
- kfree(mc);
- }
-
- mutex_unlock(&alloclock);
-}
-EXPORT_SYMBOL_GPL(pti_release_masterchannel);
-
-/**
- * pti_writedata()- Kernel API function used to write trace
- * debugging data to PTI HW.
- *
- * @mc: Master, channel aperture ID address to write to.
- * Null value will return with no write occurring.
- * @buf: Trace debuging data to write to the PTI HW.
- * Null value will return with no write occurring.
- * @count: Size of buf. Value of 0 or a negative number will
- * return with no write occuring.
- */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count)
-{
- /*
- * since this function is exported, this is treated like an
- * API function, thus, all parameters should
- * be checked for validity.
- */
- if ((mc != NULL) && (buf != NULL) && (count > 0))
- pti_write_to_aperture(mc, buf, count);
- return;
-}
-EXPORT_SYMBOL_GPL(pti_writedata);
-
-/*
- * for the tty_driver_*() basic function descriptions, see tty_driver.h.
- * Specific header comments made for PTI-related specifics.
- */
-
-/**
- * pti_tty_driver_open()- Open an Application master, channel aperture
- * ID to the PTI device via tty device.
- *
- * @tty: tty interface.
- * @filp: filp interface pased to tty_port_open() call.
- *
- * Returns:
- * int, 0 for success
- * otherwise, fail value
- *
- * The main purpose of using the tty device interface is for
- * each tty port to have a unique PTI write aperture. In an
- * example use case, ttyPTI0 gets syslogd and an APP aperture
- * ID and ttyPTI1 is where the n_tracesink ldisc hooks to route
- * modem messages into PTI. Modem trace data does not have to
- * go to ttyPTI1, but ttyPTI0 and ttyPTI1 do need to be distinct
- * master IDs. These messages go through the PTI HW and out of
- * the handheld platform and to the Fido/Lauterbach device.
- */
-static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp)
-{
- /*
- * we actually want to allocate a new channel per open, per
- * system arch. HW gives more than plenty channels for a single
- * system task to have its own channel to write trace data. This
- * also removes a locking requirement for the actual write
- * procedure.
- */
- return tty_port_open(tty->port, tty, filp);
-}
-
-/**
- * pti_tty_driver_close()- close tty device and release Application
- * master, channel aperture ID to the PTI device via tty device.
- *
- * @tty: tty interface.
- * @filp: filp interface pased to tty_port_close() call.
- *
- * The main purpose of using the tty device interface is to route
- * syslog daemon messages to the PTI HW and out of the handheld platform
- * and to the Fido/Lauterbach device.
- */
-static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp)
-{
- tty_port_close(tty->port, tty, filp);
-}
-
-/**
- * pti_tty_install()- Used to set up specific master-channels
- * to tty ports for organizational purposes when
- * tracing viewed from debuging tools.
- *
- * @driver: tty driver information.
- * @tty: tty struct containing pti information.
- *
- * Returns:
- * 0 for success
- * otherwise, error
- */
-static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- int idx = tty->index;
- struct pti_tty *pti_tty_data;
- int ret = tty_standard_install(driver, tty);
-
- if (ret == 0) {
- pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL);
- if (pti_tty_data == NULL)
- return -ENOMEM;
-
- if (idx == PTITTY_MINOR_START)
- pti_tty_data->mc = pti_request_masterchannel(0, NULL);
- else
- pti_tty_data->mc = pti_request_masterchannel(2, NULL);
-
- if (pti_tty_data->mc == NULL) {
- kfree(pti_tty_data);
- return -ENXIO;
- }
- tty->driver_data = pti_tty_data;
- }
-
- return ret;
-}
-
-/**
- * pti_tty_cleanup()- Used to de-allocate master-channel resources
- * tied to tty's of this driver.
- *
- * @tty: tty struct containing pti information.
- */
-static void pti_tty_cleanup(struct tty_struct *tty)
-{
- struct pti_tty *pti_tty_data = tty->driver_data;
- if (pti_tty_data == NULL)
- return;
- pti_release_masterchannel(pti_tty_data->mc);
- kfree(pti_tty_data);
- tty->driver_data = NULL;
-}
-
-/**
- * pti_tty_driver_write()- Write trace debugging data through the char
- * interface to the PTI HW. Part of the misc device implementation.
- *
- * @tty: tty struct containing pti information.
- * @buf: trace data to be written.
- * @len: # of byte to write.
- *
- * Returns:
- * int, # of bytes written
- * otherwise, error
- */
-static int pti_tty_driver_write(struct tty_struct *tty,
- const unsigned char *buf, int len)
-{
- struct pti_tty *pti_tty_data = tty->driver_data;
- if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) {
- pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len);
- return len;
- }
- /*
- * we can't write to the pti hardware if the private driver_data
- * and the mc address is not there.
- */
- else
- return -EFAULT;
-}
-
-/**
- * pti_tty_write_room()- Always returns 2048.
- *
- * @tty: contains tty info of the pti driver.
- */
-static int pti_tty_write_room(struct tty_struct *tty)
-{
- return 2048;
-}
-
-/**
- * pti_char_open()- Open an Application master, channel aperture
- * ID to the PTI device. Part of the misc device implementation.
- *
- * @inode: not used.
- * @filp: Output- will have a masterchannel struct set containing
- * the allocated application PTI aperture write address.
- *
- * Returns:
- * int, 0 for success
- * otherwise, a fail value
- */
-static int pti_char_open(struct inode *inode, struct file *filp)
-{
- struct pti_masterchannel *mc;
-
- /*
- * We really do want to fail immediately if
- * pti_request_masterchannel() fails,
- * before assigning the value to filp->private_data.
- * Slightly easier to debug if this driver needs debugging.
- */
- mc = pti_request_masterchannel(0, NULL);
- if (mc == NULL)
- return -ENOMEM;
- filp->private_data = mc;
- return 0;
-}
-
-/**
- * pti_char_release()- Close a char channel to the PTI device. Part
- * of the misc device implementation.
- *
- * @inode: Not used in this implementaiton.
- * @filp: Contains private_data that contains the master, channel
- * ID to be released by the PTI device.
- *
- * Returns:
- * always 0
- */
-static int pti_char_release(struct inode *inode, struct file *filp)
-{
- pti_release_masterchannel(filp->private_data);
- filp->private_data = NULL;
- return 0;
-}
-
-/**
- * pti_char_write()- Write trace debugging data through the char
- * interface to the PTI HW. Part of the misc device implementation.
- *
- * @filp: Contains private data which is used to obtain
- * master, channel write ID.
- * @data: trace data to be written.
- * @len: # of byte to write.
- * @ppose: Not used in this function implementation.
- *
- * Returns:
- * int, # of bytes written
- * otherwise, error value
- *
- * Notes: From side discussions with Alan Cox and experimenting
- * with PTI debug HW like Nokia's Fido box and Lauterbach
- * devices, 8192 byte write buffer used by USER_COPY_SIZE was
- * deemed an appropriate size for this type of usage with
- * debugging HW.
- */
-static ssize_t pti_char_write(struct file *filp, const char __user *data,
- size_t len, loff_t *ppose)
-{
- struct pti_masterchannel *mc;
- void *kbuf;
- const char __user *tmp;
- size_t size = USER_COPY_SIZE;
- size_t n = 0;
-
- tmp = data;
- mc = filp->private_data;
-
- kbuf = kmalloc(size, GFP_KERNEL);
- if (kbuf == NULL) {
- pr_err("%s(%d): buf allocation failed\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- do {
- if (len - n > USER_COPY_SIZE)
- size = USER_COPY_SIZE;
- else
- size = len - n;
-
- if (copy_from_user(kbuf, tmp, size)) {
- kfree(kbuf);
- return n ? n : -EFAULT;
- }
-
- pti_write_to_aperture(mc, kbuf, size);
- n += size;
- tmp += size;
-
- } while (len > n);
-
- kfree(kbuf);
- return len;
-}
-
-static const struct tty_operations pti_tty_driver_ops = {
- .open = pti_tty_driver_open,
- .close = pti_tty_driver_close,
- .write = pti_tty_driver_write,
- .write_room = pti_tty_write_room,
- .install = pti_tty_install,
- .cleanup = pti_tty_cleanup
-};
-
-static const struct file_operations pti_char_driver_ops = {
- .owner = THIS_MODULE,
- .write = pti_char_write,
- .open = pti_char_open,
- .release = pti_char_release,
-};
-
-static struct miscdevice pti_char_driver = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = CHARNAME,
- .fops = &pti_char_driver_ops
-};
-
-/**
- * pti_console_write()- Write to the console that has been acquired.
- *
- * @c: Not used in this implementaiton.
- * @buf: Data to be written.
- * @len: Length of buf.
- */
-static void pti_console_write(struct console *c, const char *buf, unsigned len)
-{
- static struct pti_masterchannel mc = {.master = CONSOLE_ID,
- .channel = 0};
-
- mc.channel = pti_console_channel;
- pti_console_channel = (pti_console_channel + 1) & 0x7f;
-
- pti_write_full_frame_to_aperture(&mc, buf, len);
-}
-
-/**
- * pti_console_device()- Return the driver tty structure and set the
- * associated index implementation.
- *
- * @c: Console device of the driver.
- * @index: index associated with c.
- *
- * Returns:
- * always value of pti_tty_driver structure when this function
- * is called.
- */
-static struct tty_driver *pti_console_device(struct console *c, int *index)
-{
- *index = c->index;
- return pti_tty_driver;
-}
-
-/**
- * pti_console_setup()- Initialize console variables used by the driver.
- *
- * @c: Not used.
- * @opts: Not used.
- *
- * Returns:
- * always 0.
- */
-static int pti_console_setup(struct console *c, char *opts)
-{
- pti_console_channel = 0;
- pti_control_channel = 0;
- return 0;
-}
-
-/*
- * pti_console struct, used to capture OS printk()'s and shift
- * out to the PTI device for debugging. This cannot be
- * enabled upon boot because of the possibility of eating
- * any serial console printk's (race condition discovered).
- * The console should be enabled upon when the tty port is
- * used for the first time. Since the primary purpose for
- * the tty port is to hook up syslog to it, the tty port
- * will be open for a really long time.
- */
-static struct console pti_console = {
- .name = TTYNAME,
- .write = pti_console_write,
- .device = pti_console_device,
- .setup = pti_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = 0,
-};
-
-/**
- * pti_port_activate()- Used to start/initialize any items upon
- * first opening of tty_port().
- *
- * @port: The tty port number of the PTI device.
- * @tty: The tty struct associated with this device.
- *
- * Returns:
- * always returns 0
- *
- * Notes: The primary purpose of the PTI tty port 0 is to hook
- * the syslog daemon to it; thus this port will be open for a
- * very long time.
- */
-static int pti_port_activate(struct tty_port *port, struct tty_struct *tty)
-{
- if (port->tty->index == PTITTY_MINOR_START)
- console_start(&pti_console);
- return 0;
-}
-
-/**
- * pti_port_shutdown()- Used to stop/shutdown any items upon the
- * last tty port close.
- *
- * @port: The tty port number of the PTI device.
- *
- * Notes: The primary purpose of the PTI tty port 0 is to hook
- * the syslog daemon to it; thus this port will be open for a
- * very long time.
- */
-static void pti_port_shutdown(struct tty_port *port)
-{
- if (port->tty->index == PTITTY_MINOR_START)
- console_stop(&pti_console);
-}
-
-static const struct tty_port_operations tty_port_ops = {
- .activate = pti_port_activate,
- .shutdown = pti_port_shutdown,
-};
-
-/*
- * Note the _probe() call sets everything up and ties the char and tty
- * to successfully detecting the PTI device on the pci bus.
- */
-
-/**
- * pti_pci_probe()- Used to detect pti on the pci bus and set
- * things up in the driver.
- *
- * @pdev: pci_dev struct values for pti.
- * @ent: pci_device_id struct for pti driver.
- *
- * Returns:
- * 0 for success
- * otherwise, error
- */
-static int pti_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- unsigned int a;
- int retval;
- int pci_bar = 1;
-
- dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
- __func__, __LINE__, pdev->vendor, pdev->device);
-
- retval = misc_register(&pti_char_driver);
- if (retval) {
- pr_err("%s(%d): CHAR registration failed of pti driver\n",
- __func__, __LINE__);
- pr_err("%s(%d): Error value returned: %d\n",
- __func__, __LINE__, retval);
- goto err;
- }
-
- retval = pci_enable_device(pdev);
- if (retval != 0) {
- dev_err(&pdev->dev,
- "%s: pci_enable_device() returned error %d\n",
- __func__, retval);
- goto err_unreg_misc;
- }
-
- drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
- if (drv_data == NULL) {
- retval = -ENOMEM;
- dev_err(&pdev->dev,
- "%s(%d): kmalloc() returned NULL memory.\n",
- __func__, __LINE__);
- goto err_disable_pci;
- }
- drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
-
- retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
- if (retval != 0) {
- dev_err(&pdev->dev,
- "%s(%d): pci_request_region() returned error %d\n",
- __func__, __LINE__, retval);
- goto err_free_dd;
- }
- drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
- drv_data->pti_ioaddr =
- ioremap((u32)drv_data->aperture_base,
- APERTURE_LEN);
- if (!drv_data->pti_ioaddr) {
- retval = -ENOMEM;
- goto err_rel_reg;
- }
-
- pci_set_drvdata(pdev, drv_data);
-
- for (a = 0; a < PTITTY_MINOR_NUM; a++) {
- struct tty_port *port = &drv_data->port[a];
- tty_port_init(port);
- port->ops = &tty_port_ops;
-
- tty_port_register_device(port, pti_tty_driver, a, &pdev->dev);
- }
-
- register_console(&pti_console);
-
- return 0;
-err_rel_reg:
- pci_release_region(pdev, pci_bar);
-err_free_dd:
- kfree(drv_data);
-err_disable_pci:
- pci_disable_device(pdev);
-err_unreg_misc:
- misc_deregister(&pti_char_driver);
-err:
- return retval;
-}
-
-/**
- * pti_pci_remove()- Driver exit method to remove PTI from
- * PCI bus.
- * @pdev: variable containing pci info of PTI.
- */
-static void pti_pci_remove(struct pci_dev *pdev)
-{
- struct pti_dev *drv_data = pci_get_drvdata(pdev);
- unsigned int a;
-
- unregister_console(&pti_console);
-
- for (a = 0; a < PTITTY_MINOR_NUM; a++) {
- tty_unregister_device(pti_tty_driver, a);
- tty_port_destroy(&drv_data->port[a]);
- }
-
- iounmap(drv_data->pti_ioaddr);
- kfree(drv_data);
- pci_release_region(pdev, 1);
- pci_disable_device(pdev);
-
- misc_deregister(&pti_char_driver);
-}
-
-static struct pci_driver pti_pci_driver = {
- .name = PCINAME,
- .id_table = pci_ids,
- .probe = pti_pci_probe,
- .remove = pti_pci_remove,
-};
-
-/**
- * pti_init()- Overall entry/init call to the pti driver.
- * It starts the registration process with the kernel.
- *
- * Returns:
- * int __init, 0 for success
- * otherwise value is an error
- *
- */
-static int __init pti_init(void)
-{
- int retval;
-
- /* First register module as tty device */
-
- pti_tty_driver = alloc_tty_driver(PTITTY_MINOR_NUM);
- if (pti_tty_driver == NULL) {
- pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- pti_tty_driver->driver_name = DRIVERNAME;
- pti_tty_driver->name = TTYNAME;
- pti_tty_driver->major = 0;
- pti_tty_driver->minor_start = PTITTY_MINOR_START;
- pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
- pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS;
- pti_tty_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV;
- pti_tty_driver->init_termios = tty_std_termios;
-
- tty_set_operations(pti_tty_driver, &pti_tty_driver_ops);
-
- retval = tty_register_driver(pti_tty_driver);
- if (retval) {
- pr_err("%s(%d): TTY registration failed of pti driver\n",
- __func__, __LINE__);
- pr_err("%s(%d): Error value returned: %d\n",
- __func__, __LINE__, retval);
-
- goto put_tty;
- }
-
- retval = pci_register_driver(&pti_pci_driver);
- if (retval) {
- pr_err("%s(%d): PCI registration failed of pti driver\n",
- __func__, __LINE__);
- pr_err("%s(%d): Error value returned: %d\n",
- __func__, __LINE__, retval);
- goto unreg_tty;
- }
-
- return 0;
-unreg_tty:
- tty_unregister_driver(pti_tty_driver);
-put_tty:
- put_tty_driver(pti_tty_driver);
- pti_tty_driver = NULL;
- return retval;
-}
-
-/**
- * pti_exit()- Unregisters this module as a tty and pci driver.
- */
-static void __exit pti_exit(void)
-{
- tty_unregister_driver(pti_tty_driver);
- pci_unregister_driver(&pti_pci_driver);
- put_tty_driver(pti_tty_driver);
-}
-
-module_init(pti_init);
-module_exit(pti_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ken Mills, Jay Freyensee");
-MODULE_DESCRIPTION("PTI Driver");
-
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 41cab297d66e..9f350e05ef68 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -19,6 +19,47 @@
#include <uapi/misc/pvpanic.h>
static void __iomem *base;
+static unsigned int capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+static unsigned int events;
+
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%x\n", capability);
+}
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%x\n", events);
+}
+
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int tmp;
+ int err;
+
+ err = kstrtouint(buf, 16, &tmp);
+ if (err)
+ return err;
+
+ if ((tmp & capability) != tmp)
+ return -EINVAL;
+
+ events = tmp;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(events);
+
+static struct attribute *pvpanic_dev_attrs[] = {
+ &dev_attr_capability.attr,
+ &dev_attr_events.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pvpanic_dev);
MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic device driver");
@@ -27,7 +68,8 @@ MODULE_LICENSE("GPL");
static void
pvpanic_send_event(unsigned int event)
{
- iowrite8(event, base);
+ if (event & capability & events)
+ iowrite8(event, base);
}
static int
@@ -73,8 +115,13 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
return -EINVAL;
}
- atomic_notifier_chain_register(&panic_notifier_list,
- &pvpanic_panic_nb);
+ /* initlize capability by RDPT */
+ capability &= ioread8(base);
+ events = capability;
+
+ if (capability)
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pvpanic_panic_nb);
return 0;
}
@@ -82,8 +129,9 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
static int pvpanic_mmio_remove(struct platform_device *pdev)
{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &pvpanic_panic_nb);
+ if (capability)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &pvpanic_panic_nb);
return 0;
}
@@ -104,6 +152,7 @@ static struct platform_driver pvpanic_mmio_driver = {
.name = "pvpanic-mmio",
.of_match_table = pvpanic_mmio_match,
.acpi_match_table = pvpanic_device_ids,
+ .dev_groups = pvpanic_dev_groups,
},
.probe = pvpanic_mmio_probe,
.remove = pvpanic_mmio_remove,
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 23837d0d6f4a..2508f83bdc3f 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -208,7 +208,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
} else {
dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
- "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
+ "xp_remote_memcpy(0x%p, 0x%p, %u)\n", dst,
(void *)msg->buf_pa, msg->size);
ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
@@ -218,7 +218,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
* !!! appears in_use and we can't just call
* !!! dev_kfree_skb.
*/
- dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
+ dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%x) "
"returned error=0x%x\n", dst,
(void *)msg->buf_pa, msg->size, ret);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index c49065887e8f..880c33ab9f47 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -237,7 +237,9 @@ static struct qp_list qp_guest_endpoints = {
#define QPE_NUM_PAGES(_QPE) ((u32) \
(DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
-
+#define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \
+ ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \
+ (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY)
/*
* Frees kernel VA space for a given queue and its queue header, and
@@ -528,7 +530,7 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
- if (size > SIZE_MAX - PAGE_SIZE)
+ if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE))
return NULL;
num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
@@ -537,6 +539,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
+ if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
+ return NULL;
+
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
queue->q_header = NULL;
@@ -630,7 +635,7 @@ static void qp_release_pages(struct page **pages,
for (i = 0; i < num_pages; i++) {
if (dirty)
- set_page_dirty(pages[i]);
+ set_page_dirty_lock(pages[i]);
put_page(pages[i]);
pages[i] = NULL;
@@ -1207,7 +1212,7 @@ static int qp_alloc_guest_work(struct vmci_handle *handle,
} else {
result = qp_alloc_hypercall(queue_pair_entry);
if (result < VMCI_SUCCESS) {
- pr_warn("qp_alloc_hypercall result = %d\n", result);
+ pr_devel("qp_alloc_hypercall result = %d\n", result);
goto error;
}
}
@@ -1929,6 +1934,9 @@ int vmci_qp_broker_alloc(struct vmci_handle handle,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context)
{
+ if (!QP_SIZES_ARE_VALID(produce_size, consume_size))
+ return VMCI_ERROR_NO_RESOURCES;
+
return qp_broker_alloc(handle, peer, flags, priv_flags,
produce_size, consume_size,
page_store, context, NULL, NULL, NULL, NULL);
@@ -2685,8 +2693,7 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
* used by the device is NO_RESOURCES, so use that here too.
*/
- if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
- produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+ if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize))
return VMCI_ERROR_NO_RESOURCES;
retval = vmci_route(&src, &dst, false, &route);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index 00017fc29a52..c4e6e924d9be 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -104,7 +104,7 @@ struct vmci_qp_dtch_info {
struct vmci_qp_page_store {
/* Reference to pages backing the queue pair. */
u64 pages;
- /* Length of pageList/virtual addres range (in pages). */
+ /* Length of pageList/virtual address range (in pages). */
u32 len;
};