From a0f5a630668cb8b2ebf5204f08e957875e991780 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 1 Mar 2022 21:33:02 +0530 Subject: bus: mhi: Move host MHI code to "host" directory In preparation of the endpoint MHI support, let's move the host MHI code to its own "host" directory and adjust the toplevel MHI Kconfig & Makefile. While at it, let's also move the "pci_generic" driver to "host" directory as it is a host MHI controller driver. Reviewed-by: Hemant Kumar Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220301160308.107452-5-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/Makefile | 2 +- drivers/bus/mhi/Kconfig | 27 +- drivers/bus/mhi/Makefile | 8 +- drivers/bus/mhi/core/Makefile | 4 - drivers/bus/mhi/core/boot.c | 533 ------------ drivers/bus/mhi/core/debugfs.c | 413 --------- drivers/bus/mhi/core/init.c | 1431 ------------------------------ drivers/bus/mhi/core/internal.h | 723 ---------------- drivers/bus/mhi/core/main.c | 1687 ------------------------------------ drivers/bus/mhi/core/pm.c | 1260 --------------------------- drivers/bus/mhi/host/Kconfig | 31 + drivers/bus/mhi/host/Makefile | 6 + drivers/bus/mhi/host/boot.c | 533 ++++++++++++ drivers/bus/mhi/host/debugfs.c | 413 +++++++++ drivers/bus/mhi/host/init.c | 1431 ++++++++++++++++++++++++++++++ drivers/bus/mhi/host/internal.h | 723 ++++++++++++++++ drivers/bus/mhi/host/main.c | 1687 ++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/host/pci_generic.c | 1105 +++++++++++++++++++++++ drivers/bus/mhi/host/pm.c | 1260 +++++++++++++++++++++++++++ drivers/bus/mhi/pci_generic.c | 1105 ----------------------- 20 files changed, 7194 insertions(+), 7188 deletions(-) delete mode 100644 drivers/bus/mhi/core/Makefile delete mode 100644 drivers/bus/mhi/core/boot.c delete mode 100644 drivers/bus/mhi/core/debugfs.c delete mode 100644 drivers/bus/mhi/core/init.c delete mode 100644 drivers/bus/mhi/core/internal.h delete mode 100644 drivers/bus/mhi/core/main.c delete mode 100644 drivers/bus/mhi/core/pm.c create mode 100644 drivers/bus/mhi/host/Kconfig create mode 100644 drivers/bus/mhi/host/Makefile create mode 100644 drivers/bus/mhi/host/boot.c create mode 100644 drivers/bus/mhi/host/debugfs.c create mode 100644 drivers/bus/mhi/host/init.c create mode 100644 drivers/bus/mhi/host/internal.h create mode 100644 drivers/bus/mhi/host/main.c create mode 100644 drivers/bus/mhi/host/pci_generic.c create mode 100644 drivers/bus/mhi/host/pm.c delete mode 100644 drivers/bus/mhi/pci_generic.c (limited to 'drivers/bus') diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 52c2f35a26a9..16da51130d1a 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -39,4 +39,4 @@ obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o # MHI -obj-$(CONFIG_MHI_BUS) += mhi/ +obj-y += mhi/ diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index da5cd0c9fc62..4748df7f9cd5 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -2,30 +2,7 @@ # # MHI bus # -# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# Copyright (c) 2021, Linaro Ltd. # -config MHI_BUS - tristate "Modem Host Interface (MHI) bus" - help - Bus driver for MHI protocol. Modem Host Interface (MHI) is a - communication protocol used by the host processors to control - and communicate with modem devices over a high speed peripheral - bus or shared memory. - -config MHI_BUS_DEBUG - bool "Debugfs support for the MHI bus" - depends on MHI_BUS && DEBUG_FS - help - Enable debugfs support for use with the MHI transport. Allows - reading and/or modifying some values within the MHI controller - for debug and test purposes. - -config MHI_BUS_PCI_GENERIC - tristate "MHI PCI controller driver" - depends on MHI_BUS - depends on PCI - help - This driver provides MHI PCI controller driver for devices such as - Qualcomm SDX55 based PCIe modems. - +source "drivers/bus/mhi/host/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 0a2d778d6fb4..5f5708a249f5 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,6 +1,2 @@ -# core layer -obj-y += core/ - -obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o -mhi_pci_generic-y += pci_generic.o - +# Host MHI stack +obj-y += host/ diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile deleted file mode 100644 index c3feb4130aa3..000000000000 --- a/drivers/bus/mhi/core/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -obj-$(CONFIG_MHI_BUS) += mhi.o - -mhi-y := init.o main.o pm.o boot.o -mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c deleted file mode 100644 index 74295d3cc662..000000000000 --- a/drivers/bus/mhi/core/boot.c +++ /dev/null @@ -1,533 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -/* Setup RDDM vector table for RDDM transfer and program RXVEC */ -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, - struct image_info *img_info) -{ - struct mhi_buf *mhi_buf = img_info->mhi_buf; - struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; - void __iomem *base = mhi_cntrl->bhie; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 sequence_id; - unsigned int i; - - for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { - bhi_vec->dma_addr = mhi_buf->dma_addr; - bhi_vec->size = mhi_buf->len; - } - - dev_dbg(dev, "BHIe programming for RDDM\n"); - - mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, - upper_32_bits(mhi_buf->dma_addr)); - - mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, - lower_32_bits(mhi_buf->dma_addr)); - - mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); - sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); - - mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, - BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, - sequence_id); - - dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", - &mhi_buf->dma_addr, mhi_buf->len, sequence_id); -} - -/* Collect RDDM buffer during kernel panic */ -static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) -{ - int ret; - u32 rx_status; - enum mhi_ee_type ee; - const u32 delayus = 2000; - u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; - const u32 rddm_timeout_us = 200000; - int rddm_retry = rddm_timeout_us / delayus; - void __iomem *base = mhi_cntrl->bhie; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - TO_MHI_EXEC_STR(mhi_cntrl->ee)); - - /* - * This should only be executing during a kernel panic, we expect all - * other cores to shutdown while we're collecting RDDM buffer. After - * returning from this function, we expect the device to reset. - * - * Normaly, we read/write pm_state only after grabbing the - * pm_lock, since we're in a panic, skipping it. Also there is no - * gurantee that this state change would take effect since - * we're setting it w/o grabbing pm_lock - */ - mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; - /* update should take the effect immediately */ - smp_wmb(); - - /* - * Make sure device is not already in RDDM. In case the device asserts - * and a kernel panic follows, device will already be in RDDM. - * Do not trigger SYS ERR again and proceed with waiting for - * image download completion. - */ - ee = mhi_get_exec_env(mhi_cntrl); - if (ee == MHI_EE_MAX) - goto error_exit_rddm; - - if (ee != MHI_EE_RDDM) { - dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); - - dev_dbg(dev, "Waiting for device to enter RDDM\n"); - while (rddm_retry--) { - ee = mhi_get_exec_env(mhi_cntrl); - if (ee == MHI_EE_RDDM) - break; - - udelay(delayus); - } - - if (rddm_retry <= 0) { - /* Hardware reset so force device to enter RDDM */ - dev_dbg(dev, - "Did not enter RDDM, do a host req reset\n"); - mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, - MHI_SOC_RESET_REQ_OFFSET, - MHI_SOC_RESET_REQ); - udelay(delayus); - } - - ee = mhi_get_exec_env(mhi_cntrl); - } - - dev_dbg(dev, - "Waiting for RDDM image download via BHIe, current EE:%s\n", - TO_MHI_EXEC_STR(ee)); - - while (retry--) { - ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, - BHIE_RXVECSTATUS_STATUS_BMSK, - BHIE_RXVECSTATUS_STATUS_SHFT, - &rx_status); - if (ret) - return -EIO; - - if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) - return 0; - - udelay(delayus); - } - - ee = mhi_get_exec_env(mhi_cntrl); - ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); - - dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); - -error_exit_rddm: - dev_err(dev, "RDDM transfer failed. Current EE: %s\n", - TO_MHI_EXEC_STR(ee)); - - return -EIO; -} - -/* Download RDDM image from device */ -int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) -{ - void __iomem *base = mhi_cntrl->bhie; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 rx_status; - - if (in_panic) - return __mhi_download_rddm_in_panic(mhi_cntrl); - - dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); - - /* Wait for the image download to complete */ - wait_event_timeout(mhi_cntrl->state_event, - mhi_read_reg_field(mhi_cntrl, base, - BHIE_RXVECSTATUS_OFFS, - BHIE_RXVECSTATUS_STATUS_BMSK, - BHIE_RXVECSTATUS_STATUS_SHFT, - &rx_status) || rx_status, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; -} -EXPORT_SYMBOL_GPL(mhi_download_rddm_image); - -static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, - const struct mhi_buf *mhi_buf) -{ - void __iomem *base = mhi_cntrl->bhie; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - rwlock_t *pm_lock = &mhi_cntrl->pm_lock; - u32 tx_status, sequence_id; - int ret; - - read_lock_bh(pm_lock); - if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - read_unlock_bh(pm_lock); - return -EIO; - } - - sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); - dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", - sequence_id); - mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, - upper_32_bits(mhi_buf->dma_addr)); - - mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, - lower_32_bits(mhi_buf->dma_addr)); - - mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - - mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, - BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, - sequence_id); - read_unlock_bh(pm_lock); - - /* Wait for the image download to complete */ - ret = wait_event_timeout(mhi_cntrl->state_event, - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || - mhi_read_reg_field(mhi_cntrl, base, - BHIE_TXVECSTATUS_OFFS, - BHIE_TXVECSTATUS_STATUS_BMSK, - BHIE_TXVECSTATUS_STATUS_SHFT, - &tx_status) || tx_status, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || - tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) - return -EIO; - - return (!ret) ? -ETIMEDOUT : 0; -} - -static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, - dma_addr_t dma_addr, - size_t size) -{ - u32 tx_status, val, session_id; - int i, ret; - void __iomem *base = mhi_cntrl->bhi; - rwlock_t *pm_lock = &mhi_cntrl->pm_lock; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - struct { - char *name; - u32 offset; - } error_reg[] = { - { "ERROR_CODE", BHI_ERRCODE }, - { "ERROR_DBG1", BHI_ERRDBG1 }, - { "ERROR_DBG2", BHI_ERRDBG2 }, - { "ERROR_DBG3", BHI_ERRDBG3 }, - { NULL }, - }; - - read_lock_bh(pm_lock); - if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - read_unlock_bh(pm_lock); - goto invalid_pm_state; - } - - session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); - dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", - session_id); - mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); - mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, - upper_32_bits(dma_addr)); - mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, - lower_32_bits(dma_addr)); - mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); - mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); - read_unlock_bh(pm_lock); - - /* Wait for the image download to complete */ - ret = wait_event_timeout(mhi_cntrl->state_event, - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || - mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, - BHI_STATUS_MASK, BHI_STATUS_SHIFT, - &tx_status) || tx_status, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) - goto invalid_pm_state; - - if (tx_status == BHI_STATUS_ERROR) { - dev_err(dev, "Image transfer failed\n"); - read_lock_bh(pm_lock); - if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - for (i = 0; error_reg[i].name; i++) { - ret = mhi_read_reg(mhi_cntrl, base, - error_reg[i].offset, &val); - if (ret) - break; - dev_err(dev, "Reg: %s value: 0x%x\n", - error_reg[i].name, val); - } - } - read_unlock_bh(pm_lock); - goto invalid_pm_state; - } - - return (!ret) ? -ETIMEDOUT : 0; - -invalid_pm_state: - - return -EIO; -} - -void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, - struct image_info *image_info) -{ - int i; - struct mhi_buf *mhi_buf = image_info->mhi_buf; - - for (i = 0; i < image_info->entries; i++, mhi_buf++) - dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, - mhi_buf->buf, mhi_buf->dma_addr); - - kfree(image_info->mhi_buf); - kfree(image_info); -} - -int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, - struct image_info **image_info, - size_t alloc_size) -{ - size_t seg_size = mhi_cntrl->seg_len; - int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; - int i; - struct image_info *img_info; - struct mhi_buf *mhi_buf; - - img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); - if (!img_info) - return -ENOMEM; - - /* Allocate memory for entries */ - img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), - GFP_KERNEL); - if (!img_info->mhi_buf) - goto error_alloc_mhi_buf; - - /* Allocate and populate vector table */ - mhi_buf = img_info->mhi_buf; - for (i = 0; i < segments; i++, mhi_buf++) { - size_t vec_size = seg_size; - - /* Vector table is the last entry */ - if (i == segments - 1) - vec_size = sizeof(struct bhi_vec_entry) * i; - - mhi_buf->len = vec_size; - mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, - vec_size, &mhi_buf->dma_addr, - GFP_KERNEL); - if (!mhi_buf->buf) - goto error_alloc_segment; - } - - img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; - img_info->entries = segments; - *image_info = img_info; - - return 0; - -error_alloc_segment: - for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) - dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, - mhi_buf->buf, mhi_buf->dma_addr); - -error_alloc_mhi_buf: - kfree(img_info); - - return -ENOMEM; -} - -static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, - const struct firmware *firmware, - struct image_info *img_info) -{ - size_t remainder = firmware->size; - size_t to_cpy; - const u8 *buf = firmware->data; - struct mhi_buf *mhi_buf = img_info->mhi_buf; - struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; - - while (remainder) { - to_cpy = min(remainder, mhi_buf->len); - memcpy(mhi_buf->buf, buf, to_cpy); - bhi_vec->dma_addr = mhi_buf->dma_addr; - bhi_vec->size = to_cpy; - - buf += to_cpy; - remainder -= to_cpy; - bhi_vec++; - mhi_buf++; - } -} - -void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) -{ - const struct firmware *firmware = NULL; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - const char *fw_name; - void *buf; - dma_addr_t dma_addr; - size_t size; - int i, ret; - - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - dev_err(dev, "Device MHI is not in valid state\n"); - return; - } - - /* save hardware info from BHI */ - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, - &mhi_cntrl->serial_number); - if (ret) - dev_err(dev, "Could not capture serial number via BHI\n"); - - for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), - &mhi_cntrl->oem_pk_hash[i]); - if (ret) { - dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); - break; - } - } - - /* wait for ready on pass through or any other execution environment */ - if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee)) - goto fw_load_ready_state; - - fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? - mhi_cntrl->edl_image : mhi_cntrl->fw_image; - - if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || - !mhi_cntrl->seg_len))) { - dev_err(dev, - "No firmware image defined or !sbl_size || !seg_len\n"); - goto error_fw_load; - } - - ret = request_firmware(&firmware, fw_name, dev); - if (ret) { - dev_err(dev, "Error loading firmware: %d\n", ret); - goto error_fw_load; - } - - size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; - - /* SBL size provided is maximum size, not necessarily the image size */ - if (size > firmware->size) - size = firmware->size; - - buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, - GFP_KERNEL); - if (!buf) { - release_firmware(firmware); - goto error_fw_load; - } - - /* Download image using BHI */ - memcpy(buf, firmware->data, size); - ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); - dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); - - /* Error or in EDL mode, we're done */ - if (ret) { - dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); - release_firmware(firmware); - goto error_fw_load; - } - - /* Wait for ready since EDL image was loaded */ - if (fw_name == mhi_cntrl->edl_image) { - release_firmware(firmware); - goto fw_load_ready_state; - } - - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_cntrl->dev_state = MHI_STATE_RESET; - write_unlock_irq(&mhi_cntrl->pm_lock); - - /* - * If we're doing fbc, populate vector tables while - * device transitioning into MHI READY state - */ - if (mhi_cntrl->fbc_download) { - ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, - firmware->size); - if (ret) { - release_firmware(firmware); - goto error_fw_load; - } - - /* Load the firmware into BHIE vec table */ - mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); - } - - release_firmware(firmware); - -fw_load_ready_state: - /* Transitioning into MHI RESET->READY state */ - ret = mhi_ready_state_transition(mhi_cntrl); - if (ret) { - dev_err(dev, "MHI did not enter READY state\n"); - goto error_ready_state; - } - - dev_info(dev, "Wait for device to enter SBL or Mission mode\n"); - return; - -error_ready_state: - if (mhi_cntrl->fbc_download) { - mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); - mhi_cntrl->fbc_image = NULL; - } - -error_fw_load: - mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; - wake_up_all(&mhi_cntrl->state_event); -} - -int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) -{ - struct image_info *image_info = mhi_cntrl->fbc_image; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int ret; - - if (!image_info) - return -EIO; - - ret = mhi_fw_load_bhie(mhi_cntrl, - /* Vector table is the last entry */ - &image_info->mhi_buf[image_info->entries - 1]); - if (ret) { - dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); - mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; - wake_up_all(&mhi_cntrl->state_event); - } - - return ret; -} diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c deleted file mode 100644 index d818586c229d..000000000000 --- a/drivers/bus/mhi/core/debugfs.c +++ /dev/null @@ -1,413 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * - */ - -#include -#include -#include -#include -#include -#include -#include "internal.h" - -static int mhi_debugfs_states_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - - /* states */ - seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - TO_MHI_EXEC_STR(mhi_cntrl->ee), - mhi_cntrl->wake_set ? "true" : "false"); - - /* counters */ - seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2, - mhi_cntrl->M3); - - seq_printf(m, " device wake: %u pending packets: %u\n", - atomic_read(&mhi_cntrl->dev_wake), - atomic_read(&mhi_cntrl->pending_pkts)); - - return 0; -} - -static int mhi_debugfs_events_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - struct mhi_event *mhi_event; - struct mhi_event_ctxt *er_ctxt; - int i; - - if (!mhi_is_active(mhi_cntrl)) { - seq_puts(m, "Device not ready\n"); - return -ENODEV; - } - - er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; - i++, er_ctxt++, mhi_event++) { - struct mhi_ring *ring = &mhi_event->ring; - - if (mhi_event->offload_ev) { - seq_printf(m, "Index: %d is an offload event ring\n", - i); - continue; - } - - seq_printf(m, "Index: %d intmod count: %lu time: %lu", - i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> - EV_CTX_INTMODC_SHIFT, - (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> - EV_CTX_INTMODT_SHIFT); - - seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), - le64_to_cpu(er_ctxt->rlen)); - - seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), - le64_to_cpu(er_ctxt->wp)); - - seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, - &mhi_event->db_cfg.db_val); - } - - return 0; -} - -static int mhi_debugfs_channels_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - struct mhi_chan *mhi_chan; - struct mhi_chan_ctxt *chan_ctxt; - int i; - - if (!mhi_is_active(mhi_cntrl)) { - seq_puts(m, "Device not ready\n"); - return -ENODEV; - } - - mhi_chan = mhi_cntrl->mhi_chan; - chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; - for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { - struct mhi_ring *ring = &mhi_chan->tre_ring; - - if (mhi_chan->offload_ch) { - seq_printf(m, "%s(%u) is an offload channel\n", - mhi_chan->name, mhi_chan->chan); - continue; - } - - if (!mhi_chan->mhi_dev) - continue; - - seq_printf(m, - "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", - mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & - CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, - (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> - CHAN_CTX_BRSTMODE_SHIFT, (le32_to_cpu(chan_ctxt->chcfg) & - CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); - - seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), - le32_to_cpu(chan_ctxt->erindex)); - - seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", - le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), - le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp)); - - seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", - ring->rp, ring->wp, - &mhi_chan->db_cfg.db_val); - } - - return 0; -} - -static int mhi_device_info_show(struct device *dev, void *data) -{ - struct mhi_device *mhi_dev; - - if (dev->bus != &mhi_bus_type) - return 0; - - mhi_dev = to_mhi_device(dev); - - seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u", - mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer", - mhi_dev->dev_wake); - - /* for transfer device types only */ - if (mhi_dev->dev_type == MHI_DEVICE_XFER) - seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)", - mhi_dev->ul_chan_id, mhi_dev->dl_chan_id); - - seq_puts((struct seq_file *)data, "\n"); - - return 0; -} - -static int mhi_debugfs_devices_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - - if (!mhi_is_active(mhi_cntrl)) { - seq_puts(m, "Device not ready\n"); - return -ENODEV; - } - - /* Show controller and client(s) info */ - mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); - device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); - - return 0; -} - -static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - enum mhi_state state; - enum mhi_ee_type ee; - int i, ret = -EIO; - u32 val; - void __iomem *mhi_base = mhi_cntrl->regs; - void __iomem *bhi_base = mhi_cntrl->bhi; - void __iomem *bhie_base = mhi_cntrl->bhie; - void __iomem *wake_db = mhi_cntrl->wake_db; - struct { - const char *name; - int offset; - void __iomem *base; - } regs[] = { - { "MHI_REGLEN", MHIREGLEN, mhi_base}, - { "MHI_VER", MHIVER, mhi_base}, - { "MHI_CFG", MHICFG, mhi_base}, - { "MHI_CTRL", MHICTRL, mhi_base}, - { "MHI_STATUS", MHISTATUS, mhi_base}, - { "MHI_WAKE_DB", 0, wake_db}, - { "BHI_EXECENV", BHI_EXECENV, bhi_base}, - { "BHI_STATUS", BHI_STATUS, bhi_base}, - { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, - { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, - { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, - { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, - { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, - { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, - { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, - { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, - { NULL }, - }; - - if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) - return ret; - - seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - TO_MHI_EXEC_STR(mhi_cntrl->ee)); - - state = mhi_get_mhi_state(mhi_cntrl); - ee = mhi_get_exec_env(mhi_cntrl); - seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), - TO_MHI_STATE_STR(state)); - - for (i = 0; regs[i].name; i++) { - if (!regs[i].base) - continue; - ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset, - &val); - if (ret) - continue; - - seq_printf(m, "%s: 0x%x\n", regs[i].name, val); - } - - return 0; -} - -static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; - - if (!mhi_is_active(mhi_cntrl)) { - seq_puts(m, "Device not ready\n"); - return -ENODEV; - } - - seq_printf(m, - "Wake count: %d\n%s\n", mhi_dev->dev_wake, - "Usage: echo get/put > device_wake to vote/unvote for M0"); - - return 0; -} - -static ssize_t mhi_debugfs_device_wake_write(struct file *file, - const char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct seq_file *m = file->private_data; - struct mhi_controller *mhi_cntrl = m->private; - struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; - char buf[16]; - int ret = -EINVAL; - - if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - - if (!strncmp(buf, "get", 3)) { - ret = mhi_device_get_sync(mhi_dev); - } else if (!strncmp(buf, "put", 3)) { - mhi_device_put(mhi_dev); - ret = 0; - } - - return ret ? ret : count; -} - -static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d) -{ - struct mhi_controller *mhi_cntrl = m->private; - - seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms); - - return 0; -} - -static ssize_t mhi_debugfs_timeout_ms_write(struct file *file, - const char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct seq_file *m = file->private_data; - struct mhi_controller *mhi_cntrl = m->private; - u32 timeout_ms; - - if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms)) - return -EINVAL; - - mhi_cntrl->timeout_ms = timeout_ms; - - return count; -} - -static int mhi_debugfs_states_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_states_show, inode->i_private); -} - -static int mhi_debugfs_events_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_events_show, inode->i_private); -} - -static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_channels_show, inode->i_private); -} - -static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_devices_show, inode->i_private); -} - -static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_regdump_show, inode->i_private); -} - -static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private); -} - -static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp) -{ - return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private); -} - -static const struct file_operations debugfs_states_fops = { - .open = mhi_debugfs_states_open, - .release = single_release, - .read = seq_read, -}; - -static const struct file_operations debugfs_events_fops = { - .open = mhi_debugfs_events_open, - .release = single_release, - .read = seq_read, -}; - -static const struct file_operations debugfs_channels_fops = { - .open = mhi_debugfs_channels_open, - .release = single_release, - .read = seq_read, -}; - -static const struct file_operations debugfs_devices_fops = { - .open = mhi_debugfs_devices_open, - .release = single_release, - .read = seq_read, -}; - -static const struct file_operations debugfs_regdump_fops = { - .open = mhi_debugfs_regdump_open, - .release = single_release, - .read = seq_read, -}; - -static const struct file_operations debugfs_device_wake_fops = { - .open = mhi_debugfs_device_wake_open, - .write = mhi_debugfs_device_wake_write, - .release = single_release, - .read = seq_read, -}; - -static const struct file_operations debugfs_timeout_ms_fops = { - .open = mhi_debugfs_timeout_ms_open, - .write = mhi_debugfs_timeout_ms_write, - .release = single_release, - .read = seq_read, -}; - -static struct dentry *mhi_debugfs_root; - -void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) -{ - mhi_cntrl->debugfs_dentry = - debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev), - mhi_debugfs_root); - - debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_states_fops); - debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_events_fops); - debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_channels_fops); - debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_devices_fops); - debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_regdump_fops); - debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_device_wake_fops); - debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry, - mhi_cntrl, &debugfs_timeout_ms_fops); -} - -void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) -{ - debugfs_remove_recursive(mhi_cntrl->debugfs_dentry); - mhi_cntrl->debugfs_dentry = NULL; -} - -void mhi_debugfs_init(void) -{ - mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL); -} - -void mhi_debugfs_exit(void) -{ - debugfs_remove_recursive(mhi_debugfs_root); -} diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c deleted file mode 100644 index d8787aaa176b..000000000000 --- a/drivers/bus/mhi/core/init.c +++ /dev/null @@ -1,1431 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -static DEFINE_IDA(mhi_controller_ida); - -const char * const mhi_ee_str[MHI_EE_MAX] = { - [MHI_EE_PBL] = "PRIMARY BOOTLOADER", - [MHI_EE_SBL] = "SECONDARY BOOTLOADER", - [MHI_EE_AMSS] = "MISSION MODE", - [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", - [MHI_EE_WFW] = "WLAN FIRMWARE", - [MHI_EE_PTHRU] = "PASS THROUGH", - [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", - [MHI_EE_FP] = "FLASH PROGRAMMER", - [MHI_EE_DISABLE_TRANSITION] = "DISABLE", - [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", -}; - -const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { - [DEV_ST_TRANSITION_PBL] = "PBL", - [DEV_ST_TRANSITION_READY] = "READY", - [DEV_ST_TRANSITION_SBL] = "SBL", - [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", - [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", - [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", - [DEV_ST_TRANSITION_DISABLE] = "DISABLE", -}; - -const char * const mhi_state_str[MHI_STATE_MAX] = { - [MHI_STATE_RESET] = "RESET", - [MHI_STATE_READY] = "READY", - [MHI_STATE_M0] = "M0", - [MHI_STATE_M1] = "M1", - [MHI_STATE_M2] = "M2", - [MHI_STATE_M3] = "M3", - [MHI_STATE_M3_FAST] = "M3 FAST", - [MHI_STATE_BHI] = "BHI", - [MHI_STATE_SYS_ERR] = "SYS ERROR", -}; - -const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { - [MHI_CH_STATE_TYPE_RESET] = "RESET", - [MHI_CH_STATE_TYPE_STOP] = "STOP", - [MHI_CH_STATE_TYPE_START] = "START", -}; - -static const char * const mhi_pm_state_str[] = { - [MHI_PM_STATE_DISABLE] = "DISABLE", - [MHI_PM_STATE_POR] = "POWER ON RESET", - [MHI_PM_STATE_M0] = "M0", - [MHI_PM_STATE_M2] = "M2", - [MHI_PM_STATE_M3_ENTER] = "M?->M3", - [MHI_PM_STATE_M3] = "M3", - [MHI_PM_STATE_M3_EXIT] = "M3->M0", - [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", - [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", - [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", - [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", - [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", -}; - -const char *to_mhi_pm_state_str(u32 state) -{ - int index; - - if (state) - index = __fls(state); - - if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) - return "Invalid State"; - - return mhi_pm_state_str[index]; -} - -static ssize_t serial_number_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - - return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", - mhi_cntrl->serial_number); -} -static DEVICE_ATTR_RO(serial_number); - -static ssize_t oem_pk_hash_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - int i, cnt = 0; - - for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) - cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, - "OEMPKHASH[%d]: 0x%x\n", i, - mhi_cntrl->oem_pk_hash[i]); - - return cnt; -} -static DEVICE_ATTR_RO(oem_pk_hash); - -static struct attribute *mhi_dev_attrs[] = { - &dev_attr_serial_number.attr, - &dev_attr_oem_pk_hash.attr, - NULL, -}; -ATTRIBUTE_GROUPS(mhi_dev); - -/* MHI protocol requires the transfer ring to be aligned with ring length */ -static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, - struct mhi_ring *ring, - u64 len) -{ - ring->alloc_size = len + (len - 1); - ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, - &ring->dma_handle, GFP_KERNEL); - if (!ring->pre_aligned) - return -ENOMEM; - - ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); - ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); - - return 0; -} - -void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) -{ - int i; - struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - if (mhi_event->offload_ev) - continue; - - free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); - } - - free_irq(mhi_cntrl->irq[0], mhi_cntrl); -} - -int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) -{ - struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; - int i, ret; - - /* if controller driver has set irq_flags, use it */ - if (mhi_cntrl->irq_flags) - irq_flags = mhi_cntrl->irq_flags; - - /* Setup BHI_INTVEC IRQ */ - ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, - mhi_intvec_threaded_handler, - irq_flags, - "bhi", mhi_cntrl); - if (ret) - return ret; - - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - if (mhi_event->offload_ev) - continue; - - if (mhi_event->irq >= mhi_cntrl->nr_irqs) { - dev_err(dev, "irq %d not available for event ring\n", - mhi_event->irq); - ret = -EINVAL; - goto error_request; - } - - ret = request_irq(mhi_cntrl->irq[mhi_event->irq], - mhi_irq_handler, - irq_flags, - "mhi", mhi_event); - if (ret) { - dev_err(dev, "Error requesting irq:%d for ev:%d\n", - mhi_cntrl->irq[mhi_event->irq], i); - goto error_request; - } - } - - return 0; - -error_request: - for (--i, --mhi_event; i >= 0; i--, mhi_event--) { - if (mhi_event->offload_ev) - continue; - - free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); - } - free_irq(mhi_cntrl->irq[0], mhi_cntrl); - - return ret; -} - -void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) -{ - int i; - struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; - struct mhi_cmd *mhi_cmd; - struct mhi_event *mhi_event; - struct mhi_ring *ring; - - mhi_cmd = mhi_cntrl->mhi_cmd; - for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { - ring = &mhi_cmd->ring; - dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, - ring->pre_aligned, ring->dma_handle); - ring->base = NULL; - ring->iommu_base = 0; - } - - dma_free_coherent(mhi_cntrl->cntrl_dev, - sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, - mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); - - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - if (mhi_event->offload_ev) - continue; - - ring = &mhi_event->ring; - dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, - ring->pre_aligned, ring->dma_handle); - ring->base = NULL; - ring->iommu_base = 0; - } - - dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * - mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, - mhi_ctxt->er_ctxt_addr); - - dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * - mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, - mhi_ctxt->chan_ctxt_addr); - - kfree(mhi_ctxt); - mhi_cntrl->mhi_ctxt = NULL; -} - -int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) -{ - struct mhi_ctxt *mhi_ctxt; - struct mhi_chan_ctxt *chan_ctxt; - struct mhi_event_ctxt *er_ctxt; - struct mhi_cmd_ctxt *cmd_ctxt; - struct mhi_chan *mhi_chan; - struct mhi_event *mhi_event; - struct mhi_cmd *mhi_cmd; - u32 tmp; - int ret = -ENOMEM, i; - - atomic_set(&mhi_cntrl->dev_wake, 0); - atomic_set(&mhi_cntrl->pending_pkts, 0); - - mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); - if (!mhi_ctxt) - return -ENOMEM; - - /* Setup channel ctxt */ - mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, - sizeof(*mhi_ctxt->chan_ctxt) * - mhi_cntrl->max_chan, - &mhi_ctxt->chan_ctxt_addr, - GFP_KERNEL); - if (!mhi_ctxt->chan_ctxt) - goto error_alloc_chan_ctxt; - - mhi_chan = mhi_cntrl->mhi_chan; - chan_ctxt = mhi_ctxt->chan_ctxt; - for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { - /* Skip if it is an offload channel */ - if (mhi_chan->offload_ch) - continue; - - tmp = le32_to_cpu(chan_ctxt->chcfg); - tmp &= ~CHAN_CTX_CHSTATE_MASK; - tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); - tmp &= ~CHAN_CTX_BRSTMODE_MASK; - tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); - tmp &= ~CHAN_CTX_POLLCFG_MASK; - tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); - chan_ctxt->chcfg = cpu_to_le32(tmp); - - chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); - chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); - - mhi_chan->ch_state = MHI_CH_STATE_DISABLED; - mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; - } - - /* Setup event context */ - mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, - sizeof(*mhi_ctxt->er_ctxt) * - mhi_cntrl->total_ev_rings, - &mhi_ctxt->er_ctxt_addr, - GFP_KERNEL); - if (!mhi_ctxt->er_ctxt) - goto error_alloc_er_ctxt; - - er_ctxt = mhi_ctxt->er_ctxt; - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, - mhi_event++) { - struct mhi_ring *ring = &mhi_event->ring; - - /* Skip if it is an offload event */ - if (mhi_event->offload_ev) - continue; - - tmp = le32_to_cpu(er_ctxt->intmod); - tmp &= ~EV_CTX_INTMODC_MASK; - tmp &= ~EV_CTX_INTMODT_MASK; - tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); - er_ctxt->intmod = cpu_to_le32(tmp); - - er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); - er_ctxt->msivec = cpu_to_le32(mhi_event->irq); - mhi_event->db_cfg.db_mode = true; - - ring->el_size = sizeof(struct mhi_tre); - ring->len = ring->el_size * ring->elements; - ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); - if (ret) - goto error_alloc_er; - - /* - * If the read pointer equals to the write pointer, then the - * ring is empty - */ - ring->rp = ring->wp = ring->base; - er_ctxt->rbase = cpu_to_le64(ring->iommu_base); - er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; - er_ctxt->rlen = cpu_to_le64(ring->len); - ring->ctxt_wp = &er_ctxt->wp; - } - - /* Setup cmd context */ - ret = -ENOMEM; - mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, - sizeof(*mhi_ctxt->cmd_ctxt) * - NR_OF_CMD_RINGS, - &mhi_ctxt->cmd_ctxt_addr, - GFP_KERNEL); - if (!mhi_ctxt->cmd_ctxt) - goto error_alloc_er; - - mhi_cmd = mhi_cntrl->mhi_cmd; - cmd_ctxt = mhi_ctxt->cmd_ctxt; - for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { - struct mhi_ring *ring = &mhi_cmd->ring; - - ring->el_size = sizeof(struct mhi_tre); - ring->elements = CMD_EL_PER_RING; - ring->len = ring->el_size * ring->elements; - ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); - if (ret) - goto error_alloc_cmd; - - ring->rp = ring->wp = ring->base; - cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); - cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; - cmd_ctxt->rlen = cpu_to_le64(ring->len); - ring->ctxt_wp = &cmd_ctxt->wp; - } - - mhi_cntrl->mhi_ctxt = mhi_ctxt; - - return 0; - -error_alloc_cmd: - for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { - struct mhi_ring *ring = &mhi_cmd->ring; - - dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, - ring->pre_aligned, ring->dma_handle); - } - dma_free_coherent(mhi_cntrl->cntrl_dev, - sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, - mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); - i = mhi_cntrl->total_ev_rings; - mhi_event = mhi_cntrl->mhi_event + i; - -error_alloc_er: - for (--i, --mhi_event; i >= 0; i--, mhi_event--) { - struct mhi_ring *ring = &mhi_event->ring; - - if (mhi_event->offload_ev) - continue; - - dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, - ring->pre_aligned, ring->dma_handle); - } - dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * - mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, - mhi_ctxt->er_ctxt_addr); - -error_alloc_er_ctxt: - dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * - mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, - mhi_ctxt->chan_ctxt_addr); - -error_alloc_chan_ctxt: - kfree(mhi_ctxt); - - return ret; -} - -int mhi_init_mmio(struct mhi_controller *mhi_cntrl) -{ - u32 val; - int i, ret; - struct mhi_chan *mhi_chan; - struct mhi_event *mhi_event; - void __iomem *base = mhi_cntrl->regs; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - struct { - u32 offset; - u32 mask; - u32 shift; - u32 val; - } reg_info[] = { - { - CCABAP_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), - }, - { - CCABAP_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), - }, - { - ECABAP_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), - }, - { - ECABAP_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), - }, - { - CRCBAP_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), - }, - { - CRCBAP_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), - }, - { - MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, - mhi_cntrl->total_ev_rings, - }, - { - MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, - mhi_cntrl->hw_ev_rings, - }, - { - MHICTRLBASE_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->iova_start), - }, - { - MHICTRLBASE_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->iova_start), - }, - { - MHIDATABASE_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->iova_start), - }, - { - MHIDATABASE_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->iova_start), - }, - { - MHICTRLLIMIT_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->iova_stop), - }, - { - MHICTRLLIMIT_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->iova_stop), - }, - { - MHIDATALIMIT_HIGHER, U32_MAX, 0, - upper_32_bits(mhi_cntrl->iova_stop), - }, - { - MHIDATALIMIT_LOWER, U32_MAX, 0, - lower_32_bits(mhi_cntrl->iova_stop), - }, - { 0, 0, 0 } - }; - - dev_dbg(dev, "Initializing MHI registers\n"); - - /* Read channel db offset */ - ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, - CHDBOFF_CHDBOFF_SHIFT, &val); - if (ret) { - dev_err(dev, "Unable to read CHDBOFF register\n"); - return -EIO; - } - - /* Setup wake db */ - mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); - mhi_cntrl->wake_set = false; - - /* Setup channel db address for each channel in tre_ring */ - mhi_chan = mhi_cntrl->mhi_chan; - for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) - mhi_chan->tre_ring.db_addr = base + val; - - /* Read event ring db offset */ - ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, - ERDBOFF_ERDBOFF_SHIFT, &val); - if (ret) { - dev_err(dev, "Unable to read ERDBOFF register\n"); - return -EIO; - } - - /* Setup event db address for each ev_ring */ - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { - if (mhi_event->offload_ev) - continue; - - mhi_event->ring.db_addr = base + val; - } - - /* Setup DB register for primary CMD rings */ - mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; - - /* Write to MMIO registers */ - for (i = 0; reg_info[i].offset; i++) - mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, - reg_info[i].mask, reg_info[i].shift, - reg_info[i].val); - - return 0; -} - -void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) -{ - struct mhi_ring *buf_ring; - struct mhi_ring *tre_ring; - struct mhi_chan_ctxt *chan_ctxt; - u32 tmp; - - buf_ring = &mhi_chan->buf_ring; - tre_ring = &mhi_chan->tre_ring; - chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; - - if (!chan_ctxt->rbase) /* Already uninitialized */ - return; - - dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, - tre_ring->pre_aligned, tre_ring->dma_handle); - vfree(buf_ring->base); - - buf_ring->base = tre_ring->base = NULL; - tre_ring->ctxt_wp = NULL; - chan_ctxt->rbase = 0; - chan_ctxt->rlen = 0; - chan_ctxt->rp = 0; - chan_ctxt->wp = 0; - - tmp = le32_to_cpu(chan_ctxt->chcfg); - tmp &= ~CHAN_CTX_CHSTATE_MASK; - tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = cpu_to_le32(tmp); - - /* Update to all cores */ - smp_wmb(); -} - -int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) -{ - struct mhi_ring *buf_ring; - struct mhi_ring *tre_ring; - struct mhi_chan_ctxt *chan_ctxt; - u32 tmp; - int ret; - - buf_ring = &mhi_chan->buf_ring; - tre_ring = &mhi_chan->tre_ring; - tre_ring->el_size = sizeof(struct mhi_tre); - tre_ring->len = tre_ring->el_size * tre_ring->elements; - chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; - ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); - if (ret) - return -ENOMEM; - - buf_ring->el_size = sizeof(struct mhi_buf_info); - buf_ring->len = buf_ring->el_size * buf_ring->elements; - buf_ring->base = vzalloc(buf_ring->len); - - if (!buf_ring->base) { - dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, - tre_ring->pre_aligned, tre_ring->dma_handle); - return -ENOMEM; - } - - tmp = le32_to_cpu(chan_ctxt->chcfg); - tmp &= ~CHAN_CTX_CHSTATE_MASK; - tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = cpu_to_le32(tmp); - - chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); - chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; - chan_ctxt->rlen = cpu_to_le64(tre_ring->len); - tre_ring->ctxt_wp = &chan_ctxt->wp; - - tre_ring->rp = tre_ring->wp = tre_ring->base; - buf_ring->rp = buf_ring->wp = buf_ring->base; - mhi_chan->db_cfg.db_mode = 1; - - /* Update to all cores */ - smp_wmb(); - - return 0; -} - -static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, - const struct mhi_controller_config *config) -{ - struct mhi_event *mhi_event; - const struct mhi_event_config *event_cfg; - struct device *dev = mhi_cntrl->cntrl_dev; - int i, num; - - num = config->num_events; - mhi_cntrl->total_ev_rings = num; - mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), - GFP_KERNEL); - if (!mhi_cntrl->mhi_event) - return -ENOMEM; - - /* Populate event ring */ - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < num; i++) { - event_cfg = &config->event_cfg[i]; - - mhi_event->er_index = i; - mhi_event->ring.elements = event_cfg->num_elements; - mhi_event->intmod = event_cfg->irq_moderation_ms; - mhi_event->irq = event_cfg->irq; - - if (event_cfg->channel != U32_MAX) { - /* This event ring has a dedicated channel */ - mhi_event->chan = event_cfg->channel; - if (mhi_event->chan >= mhi_cntrl->max_chan) { - dev_err(dev, - "Event Ring channel not available\n"); - goto error_ev_cfg; - } - - mhi_event->mhi_chan = - &mhi_cntrl->mhi_chan[mhi_event->chan]; - } - - /* Priority is fixed to 1 for now */ - mhi_event->priority = 1; - - mhi_event->db_cfg.brstmode = event_cfg->mode; - if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) - goto error_ev_cfg; - - if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) - mhi_event->db_cfg.process_db = mhi_db_brstmode; - else - mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; - - mhi_event->data_type = event_cfg->data_type; - - switch (mhi_event->data_type) { - case MHI_ER_DATA: - mhi_event->process_event = mhi_process_data_event_ring; - break; - case MHI_ER_CTRL: - mhi_event->process_event = mhi_process_ctrl_ev_ring; - break; - default: - dev_err(dev, "Event Ring type not supported\n"); - goto error_ev_cfg; - } - - mhi_event->hw_ring = event_cfg->hardware_event; - if (mhi_event->hw_ring) - mhi_cntrl->hw_ev_rings++; - else - mhi_cntrl->sw_ev_rings++; - - mhi_event->cl_manage = event_cfg->client_managed; - mhi_event->offload_ev = event_cfg->offload_channel; - mhi_event++; - } - - return 0; - -error_ev_cfg: - - kfree(mhi_cntrl->mhi_event); - return -EINVAL; -} - -static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, - const struct mhi_controller_config *config) -{ - const struct mhi_channel_config *ch_cfg; - struct device *dev = mhi_cntrl->cntrl_dev; - int i; - u32 chan; - - mhi_cntrl->max_chan = config->max_channels; - - /* - * The allocation of MHI channels can exceed 32KB in some scenarios, - * so to avoid any memory possible allocation failures, vzalloc is - * used here - */ - mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * - sizeof(*mhi_cntrl->mhi_chan)); - if (!mhi_cntrl->mhi_chan) - return -ENOMEM; - - INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); - - /* Populate channel configurations */ - for (i = 0; i < config->num_channels; i++) { - struct mhi_chan *mhi_chan; - - ch_cfg = &config->ch_cfg[i]; - - chan = ch_cfg->num; - if (chan >= mhi_cntrl->max_chan) { - dev_err(dev, "Channel %d not available\n", chan); - goto error_chan_cfg; - } - - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - mhi_chan->name = ch_cfg->name; - mhi_chan->chan = chan; - - mhi_chan->tre_ring.elements = ch_cfg->num_elements; - if (!mhi_chan->tre_ring.elements) - goto error_chan_cfg; - - /* - * For some channels, local ring length should be bigger than - * the transfer ring length due to internal logical channels - * in device. So host can queue much more buffers than transfer - * ring length. Example, RSC channels should have a larger local - * channel length than transfer ring length. - */ - mhi_chan->buf_ring.elements = ch_cfg->local_elements; - if (!mhi_chan->buf_ring.elements) - mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; - mhi_chan->er_index = ch_cfg->event_ring; - mhi_chan->dir = ch_cfg->dir; - - /* - * For most channels, chtype is identical to channel directions. - * So, if it is not defined then assign channel direction to - * chtype - */ - mhi_chan->type = ch_cfg->type; - if (!mhi_chan->type) - mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; - - mhi_chan->ee_mask = ch_cfg->ee_mask; - mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; - mhi_chan->lpm_notify = ch_cfg->lpm_notify; - mhi_chan->offload_ch = ch_cfg->offload_channel; - mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; - mhi_chan->pre_alloc = ch_cfg->auto_queue; - mhi_chan->wake_capable = ch_cfg->wake_capable; - - /* - * If MHI host allocates buffers, then the channel direction - * should be DMA_FROM_DEVICE - */ - if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { - dev_err(dev, "Invalid channel configuration\n"); - goto error_chan_cfg; - } - - /* - * Bi-directional and direction less channel must be an - * offload channel - */ - if ((mhi_chan->dir == DMA_BIDIRECTIONAL || - mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { - dev_err(dev, "Invalid channel configuration\n"); - goto error_chan_cfg; - } - - if (!mhi_chan->offload_ch) { - mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; - if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { - dev_err(dev, "Invalid Door bell mode\n"); - goto error_chan_cfg; - } - } - - if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) - mhi_chan->db_cfg.process_db = mhi_db_brstmode; - else - mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; - - mhi_chan->configured = true; - - if (mhi_chan->lpm_notify) - list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); - } - - return 0; - -error_chan_cfg: - vfree(mhi_cntrl->mhi_chan); - - return -EINVAL; -} - -static int parse_config(struct mhi_controller *mhi_cntrl, - const struct mhi_controller_config *config) -{ - int ret; - - /* Parse MHI channel configuration */ - ret = parse_ch_cfg(mhi_cntrl, config); - if (ret) - return ret; - - /* Parse MHI event configuration */ - ret = parse_ev_cfg(mhi_cntrl, config); - if (ret) - goto error_ev_cfg; - - mhi_cntrl->timeout_ms = config->timeout_ms; - if (!mhi_cntrl->timeout_ms) - mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; - - mhi_cntrl->bounce_buf = config->use_bounce_buf; - mhi_cntrl->buffer_len = config->buf_len; - if (!mhi_cntrl->buffer_len) - mhi_cntrl->buffer_len = MHI_MAX_MTU; - - /* By default, host is allowed to ring DB in both M0 and M2 states */ - mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; - if (config->m2_no_db) - mhi_cntrl->db_access &= ~MHI_PM_M2; - - return 0; - -error_ev_cfg: - vfree(mhi_cntrl->mhi_chan); - - return ret; -} - -int mhi_register_controller(struct mhi_controller *mhi_cntrl, - const struct mhi_controller_config *config) -{ - struct mhi_event *mhi_event; - struct mhi_chan *mhi_chan; - struct mhi_cmd *mhi_cmd; - struct mhi_device *mhi_dev; - u32 soc_info; - int ret, i; - - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || - !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || - !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || - !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || - !mhi_cntrl->irq || !mhi_cntrl->reg_len) - return -EINVAL; - - ret = parse_config(mhi_cntrl, config); - if (ret) - return -EINVAL; - - mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, - sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); - if (!mhi_cntrl->mhi_cmd) { - ret = -ENOMEM; - goto err_free_event; - } - - INIT_LIST_HEAD(&mhi_cntrl->transition_list); - mutex_init(&mhi_cntrl->pm_mutex); - rwlock_init(&mhi_cntrl->pm_lock); - spin_lock_init(&mhi_cntrl->transition_lock); - spin_lock_init(&mhi_cntrl->wlock); - INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); - init_waitqueue_head(&mhi_cntrl->state_event); - - mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); - if (!mhi_cntrl->hiprio_wq) { - dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); - ret = -ENOMEM; - goto err_free_cmd; - } - - mhi_cmd = mhi_cntrl->mhi_cmd; - for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) - spin_lock_init(&mhi_cmd->lock); - - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - /* Skip for offload events */ - if (mhi_event->offload_ev) - continue; - - mhi_event->mhi_cntrl = mhi_cntrl; - spin_lock_init(&mhi_event->lock); - if (mhi_event->data_type == MHI_ER_CTRL) - tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, - (ulong)mhi_event); - else - tasklet_init(&mhi_event->task, mhi_ev_task, - (ulong)mhi_event); - } - - mhi_chan = mhi_cntrl->mhi_chan; - for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { - mutex_init(&mhi_chan->mutex); - init_completion(&mhi_chan->completion); - rwlock_init(&mhi_chan->lock); - - /* used in setting bei field of TRE */ - mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; - mhi_chan->intmod = mhi_event->intmod; - } - - if (mhi_cntrl->bounce_buf) { - mhi_cntrl->map_single = mhi_map_single_use_bb; - mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; - } else { - mhi_cntrl->map_single = mhi_map_single_no_bb; - mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; - } - - /* Read the MHI device info */ - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, - SOC_HW_VERSION_OFFS, &soc_info); - if (ret) - goto err_destroy_wq; - - mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> - SOC_HW_VERSION_FAM_NUM_SHFT; - mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> - SOC_HW_VERSION_DEV_NUM_SHFT; - mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> - SOC_HW_VERSION_MAJOR_VER_SHFT; - mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> - SOC_HW_VERSION_MINOR_VER_SHFT; - - mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); - if (mhi_cntrl->index < 0) { - ret = mhi_cntrl->index; - goto err_destroy_wq; - } - - /* Register controller with MHI bus */ - mhi_dev = mhi_alloc_device(mhi_cntrl); - if (IS_ERR(mhi_dev)) { - dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); - ret = PTR_ERR(mhi_dev); - goto err_ida_free; - } - - mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; - mhi_dev->mhi_cntrl = mhi_cntrl; - dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); - mhi_dev->name = dev_name(&mhi_dev->dev); - - /* Init wakeup source */ - device_init_wakeup(&mhi_dev->dev, true); - - ret = device_add(&mhi_dev->dev); - if (ret) - goto err_release_dev; - - mhi_cntrl->mhi_dev = mhi_dev; - - mhi_create_debugfs(mhi_cntrl); - - return 0; - -err_release_dev: - put_device(&mhi_dev->dev); -err_ida_free: - ida_free(&mhi_controller_ida, mhi_cntrl->index); -err_destroy_wq: - destroy_workqueue(mhi_cntrl->hiprio_wq); -err_free_cmd: - kfree(mhi_cntrl->mhi_cmd); -err_free_event: - kfree(mhi_cntrl->mhi_event); - vfree(mhi_cntrl->mhi_chan); - - return ret; -} -EXPORT_SYMBOL_GPL(mhi_register_controller); - -void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) -{ - struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; - struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; - unsigned int i; - - mhi_destroy_debugfs(mhi_cntrl); - - destroy_workqueue(mhi_cntrl->hiprio_wq); - kfree(mhi_cntrl->mhi_cmd); - kfree(mhi_cntrl->mhi_event); - - /* Drop the references to MHI devices created for channels */ - for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { - if (!mhi_chan->mhi_dev) - continue; - - put_device(&mhi_chan->mhi_dev->dev); - } - vfree(mhi_cntrl->mhi_chan); - - device_del(&mhi_dev->dev); - put_device(&mhi_dev->dev); - - ida_free(&mhi_controller_ida, mhi_cntrl->index); -} -EXPORT_SYMBOL_GPL(mhi_unregister_controller); - -struct mhi_controller *mhi_alloc_controller(void) -{ - struct mhi_controller *mhi_cntrl; - - mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); - - return mhi_cntrl; -} -EXPORT_SYMBOL_GPL(mhi_alloc_controller); - -void mhi_free_controller(struct mhi_controller *mhi_cntrl) -{ - kfree(mhi_cntrl); -} -EXPORT_SYMBOL_GPL(mhi_free_controller); - -int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) -{ - struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 bhi_off, bhie_off; - int ret; - - mutex_lock(&mhi_cntrl->pm_mutex); - - ret = mhi_init_dev_ctxt(mhi_cntrl); - if (ret) - goto error_dev_ctxt; - - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); - if (ret) { - dev_err(dev, "Error getting BHI offset\n"); - goto error_reg_offset; - } - - if (bhi_off >= mhi_cntrl->reg_len) { - dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", - bhi_off, mhi_cntrl->reg_len); - ret = -EINVAL; - goto error_reg_offset; - } - mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; - - if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, - &bhie_off); - if (ret) { - dev_err(dev, "Error getting BHIE offset\n"); - goto error_reg_offset; - } - - if (bhie_off >= mhi_cntrl->reg_len) { - dev_err(dev, - "BHIe offset: 0x%x is out of range: 0x%zx\n", - bhie_off, mhi_cntrl->reg_len); - ret = -EINVAL; - goto error_reg_offset; - } - mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; - } - - if (mhi_cntrl->rddm_size) { - /* - * This controller supports RDDM, so we need to manually clear - * BHIE RX registers since POR values are undefined. - */ - memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, - 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + - 4); - /* - * Allocate RDDM table for debugging purpose if specified - */ - mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, - mhi_cntrl->rddm_size); - if (mhi_cntrl->rddm_image) - mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); - } - - mutex_unlock(&mhi_cntrl->pm_mutex); - - return 0; - -error_reg_offset: - mhi_deinit_dev_ctxt(mhi_cntrl); - -error_dev_ctxt: - mutex_unlock(&mhi_cntrl->pm_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); - -void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) -{ - if (mhi_cntrl->fbc_image) { - mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); - mhi_cntrl->fbc_image = NULL; - } - - if (mhi_cntrl->rddm_image) { - mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); - mhi_cntrl->rddm_image = NULL; - } - - mhi_cntrl->bhi = NULL; - mhi_cntrl->bhie = NULL; - - mhi_deinit_dev_ctxt(mhi_cntrl); -} -EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); - -static void mhi_release_device(struct device *dev) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - - /* - * We need to set the mhi_chan->mhi_dev to NULL here since the MHI - * devices for the channels will only get created if the mhi_dev - * associated with it is NULL. This scenario will happen during the - * controller suspend and resume. - */ - if (mhi_dev->ul_chan) - mhi_dev->ul_chan->mhi_dev = NULL; - - if (mhi_dev->dl_chan) - mhi_dev->dl_chan->mhi_dev = NULL; - - kfree(mhi_dev); -} - -struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) -{ - struct mhi_device *mhi_dev; - struct device *dev; - - mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); - if (!mhi_dev) - return ERR_PTR(-ENOMEM); - - dev = &mhi_dev->dev; - device_initialize(dev); - dev->bus = &mhi_bus_type; - dev->release = mhi_release_device; - - if (mhi_cntrl->mhi_dev) { - /* for MHI client devices, parent is the MHI controller device */ - dev->parent = &mhi_cntrl->mhi_dev->dev; - } else { - /* for MHI controller device, parent is the bus device (e.g. pci device) */ - dev->parent = mhi_cntrl->cntrl_dev; - } - - mhi_dev->mhi_cntrl = mhi_cntrl; - mhi_dev->dev_wake = 0; - - return mhi_dev; -} - -static int mhi_driver_probe(struct device *dev) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct device_driver *drv = dev->driver; - struct mhi_driver *mhi_drv = to_mhi_driver(drv); - struct mhi_event *mhi_event; - struct mhi_chan *ul_chan = mhi_dev->ul_chan; - struct mhi_chan *dl_chan = mhi_dev->dl_chan; - int ret; - - /* Bring device out of LPM */ - ret = mhi_device_get_sync(mhi_dev); - if (ret) - return ret; - - ret = -EINVAL; - - if (ul_chan) { - /* - * If channel supports LPM notifications then status_cb should - * be provided - */ - if (ul_chan->lpm_notify && !mhi_drv->status_cb) - goto exit_probe; - - /* For non-offload channels then xfer_cb should be provided */ - if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) - goto exit_probe; - - ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; - } - - ret = -EINVAL; - if (dl_chan) { - /* - * If channel supports LPM notifications then status_cb should - * be provided - */ - if (dl_chan->lpm_notify && !mhi_drv->status_cb) - goto exit_probe; - - /* For non-offload channels then xfer_cb should be provided */ - if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) - goto exit_probe; - - mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; - - /* - * If the channel event ring is managed by client, then - * status_cb must be provided so that the framework can - * notify pending data - */ - if (mhi_event->cl_manage && !mhi_drv->status_cb) - goto exit_probe; - - dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; - } - - /* Call the user provided probe function */ - ret = mhi_drv->probe(mhi_dev, mhi_dev->id); - if (ret) - goto exit_probe; - - mhi_device_put(mhi_dev); - - return ret; - -exit_probe: - mhi_unprepare_from_transfer(mhi_dev); - - mhi_device_put(mhi_dev); - - return ret; -} - -static int mhi_driver_remove(struct device *dev) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan; - enum mhi_ch_state ch_state[] = { - MHI_CH_STATE_DISABLED, - MHI_CH_STATE_DISABLED - }; - int dir; - - /* Skip if it is a controller device */ - if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) - return 0; - - /* Reset both channels */ - for (dir = 0; dir < 2; dir++) { - mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; - - if (!mhi_chan) - continue; - - /* Wake all threads waiting for completion */ - write_lock_irq(&mhi_chan->lock); - mhi_chan->ccs = MHI_EV_CC_INVALID; - complete_all(&mhi_chan->completion); - write_unlock_irq(&mhi_chan->lock); - - /* Set the channel state to disabled */ - mutex_lock(&mhi_chan->mutex); - write_lock_irq(&mhi_chan->lock); - ch_state[dir] = mhi_chan->ch_state; - mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; - write_unlock_irq(&mhi_chan->lock); - - /* Reset the non-offload channel */ - if (!mhi_chan->offload_ch) - mhi_reset_chan(mhi_cntrl, mhi_chan); - - mutex_unlock(&mhi_chan->mutex); - } - - mhi_drv->remove(mhi_dev); - - /* De-init channel if it was enabled */ - for (dir = 0; dir < 2; dir++) { - mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; - - if (!mhi_chan) - continue; - - mutex_lock(&mhi_chan->mutex); - - if ((ch_state[dir] == MHI_CH_STATE_ENABLED || - ch_state[dir] == MHI_CH_STATE_STOP) && - !mhi_chan->offload_ch) - mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); - - mhi_chan->ch_state = MHI_CH_STATE_DISABLED; - - mutex_unlock(&mhi_chan->mutex); - } - - while (mhi_dev->dev_wake) - mhi_device_put(mhi_dev); - - return 0; -} - -int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) -{ - struct device_driver *driver = &mhi_drv->driver; - - if (!mhi_drv->probe || !mhi_drv->remove) - return -EINVAL; - - driver->bus = &mhi_bus_type; - driver->owner = owner; - driver->probe = mhi_driver_probe; - driver->remove = mhi_driver_remove; - - return driver_register(driver); -} -EXPORT_SYMBOL_GPL(__mhi_driver_register); - -void mhi_driver_unregister(struct mhi_driver *mhi_drv) -{ - driver_unregister(&mhi_drv->driver); -} -EXPORT_SYMBOL_GPL(mhi_driver_unregister); - -static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - - return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, - mhi_dev->name); -} - -static int mhi_match(struct device *dev, struct device_driver *drv) -{ - struct mhi_device *mhi_dev = to_mhi_device(dev); - struct mhi_driver *mhi_drv = to_mhi_driver(drv); - const struct mhi_device_id *id; - - /* - * If the device is a controller type then there is no client driver - * associated with it - */ - if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) - return 0; - - for (id = mhi_drv->id_table; id->chan[0]; id++) - if (!strcmp(mhi_dev->name, id->chan)) { - mhi_dev->id = id; - return 1; - } - - return 0; -}; - -struct bus_type mhi_bus_type = { - .name = "mhi", - .dev_name = "mhi", - .match = mhi_match, - .uevent = mhi_uevent, - .dev_groups = mhi_dev_groups, -}; - -static int __init mhi_init(void) -{ - mhi_debugfs_init(); - return bus_register(&mhi_bus_type); -} - -static void __exit mhi_exit(void) -{ - mhi_debugfs_exit(); - bus_unregister(&mhi_bus_type); -} - -postcore_initcall(mhi_init); -module_exit(mhi_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h deleted file mode 100644 index 37c39bf1c7a9..000000000000 --- a/drivers/bus/mhi/core/internal.h +++ /dev/null @@ -1,723 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. - * - */ - -#ifndef _MHI_INT_H -#define _MHI_INT_H - -#include - -extern struct bus_type mhi_bus_type; - -#define MHIREGLEN (0x0) -#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) -#define MHIREGLEN_MHIREGLEN_SHIFT (0) - -#define MHIVER (0x8) -#define MHIVER_MHIVER_MASK (0xFFFFFFFF) -#define MHIVER_MHIVER_SHIFT (0) - -#define MHICFG (0x10) -#define MHICFG_NHWER_MASK (0xFF000000) -#define MHICFG_NHWER_SHIFT (24) -#define MHICFG_NER_MASK (0xFF0000) -#define MHICFG_NER_SHIFT (16) -#define MHICFG_NHWCH_MASK (0xFF00) -#define MHICFG_NHWCH_SHIFT (8) -#define MHICFG_NCH_MASK (0xFF) -#define MHICFG_NCH_SHIFT (0) - -#define CHDBOFF (0x18) -#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) -#define CHDBOFF_CHDBOFF_SHIFT (0) - -#define ERDBOFF (0x20) -#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) -#define ERDBOFF_ERDBOFF_SHIFT (0) - -#define BHIOFF (0x28) -#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) -#define BHIOFF_BHIOFF_SHIFT (0) - -#define BHIEOFF (0x2C) -#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) -#define BHIEOFF_BHIEOFF_SHIFT (0) - -#define DEBUGOFF (0x30) -#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) -#define DEBUGOFF_DEBUGOFF_SHIFT (0) - -#define MHICTRL (0x38) -#define MHICTRL_MHISTATE_MASK (0x0000FF00) -#define MHICTRL_MHISTATE_SHIFT (8) -#define MHICTRL_RESET_MASK (0x2) -#define MHICTRL_RESET_SHIFT (1) - -#define MHISTATUS (0x48) -#define MHISTATUS_MHISTATE_MASK (0x0000FF00) -#define MHISTATUS_MHISTATE_SHIFT (8) -#define MHISTATUS_SYSERR_MASK (0x4) -#define MHISTATUS_SYSERR_SHIFT (2) -#define MHISTATUS_READY_MASK (0x1) -#define MHISTATUS_READY_SHIFT (0) - -#define CCABAP_LOWER (0x58) -#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) -#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) - -#define CCABAP_HIGHER (0x5C) -#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) -#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) - -#define ECABAP_LOWER (0x60) -#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) -#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) - -#define ECABAP_HIGHER (0x64) -#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) -#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) - -#define CRCBAP_LOWER (0x68) -#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) -#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) - -#define CRCBAP_HIGHER (0x6C) -#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) -#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) - -#define CRDB_LOWER (0x70) -#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) -#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) - -#define CRDB_HIGHER (0x74) -#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) -#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) - -#define MHICTRLBASE_LOWER (0x80) -#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) -#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) - -#define MHICTRLBASE_HIGHER (0x84) -#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) -#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) - -#define MHICTRLLIMIT_LOWER (0x88) -#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) -#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) - -#define MHICTRLLIMIT_HIGHER (0x8C) -#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) -#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) - -#define MHIDATABASE_LOWER (0x98) -#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) -#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) - -#define MHIDATABASE_HIGHER (0x9C) -#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) -#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) - -#define MHIDATALIMIT_LOWER (0xA0) -#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) -#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) - -#define MHIDATALIMIT_HIGHER (0xA4) -#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) -#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) - -/* Host request register */ -#define MHI_SOC_RESET_REQ_OFFSET (0xB0) -#define MHI_SOC_RESET_REQ BIT(0) - -/* MHI BHI offfsets */ -#define BHI_BHIVERSION_MINOR (0x00) -#define BHI_BHIVERSION_MAJOR (0x04) -#define BHI_IMGADDR_LOW (0x08) -#define BHI_IMGADDR_HIGH (0x0C) -#define BHI_IMGSIZE (0x10) -#define BHI_RSVD1 (0x14) -#define BHI_IMGTXDB (0x18) -#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) -#define BHI_TXDB_SEQNUM_SHFT (0) -#define BHI_RSVD2 (0x1C) -#define BHI_INTVEC (0x20) -#define BHI_RSVD3 (0x24) -#define BHI_EXECENV (0x28) -#define BHI_STATUS (0x2C) -#define BHI_ERRCODE (0x30) -#define BHI_ERRDBG1 (0x34) -#define BHI_ERRDBG2 (0x38) -#define BHI_ERRDBG3 (0x3C) -#define BHI_SERIALNU (0x40) -#define BHI_SBLANTIROLLVER (0x44) -#define BHI_NUMSEG (0x48) -#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) -#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) -#define BHI_RSVD5 (0xC4) -#define BHI_STATUS_MASK (0xC0000000) -#define BHI_STATUS_SHIFT (30) -#define BHI_STATUS_ERROR (3) -#define BHI_STATUS_SUCCESS (2) -#define BHI_STATUS_RESET (0) - -/* MHI BHIE offsets */ -#define BHIE_MSMSOCID_OFFS (0x0000) -#define BHIE_TXVECADDR_LOW_OFFS (0x002C) -#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) -#define BHIE_TXVECSIZE_OFFS (0x0034) -#define BHIE_TXVECDB_OFFS (0x003C) -#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_TXVECDB_SEQNUM_SHFT (0) -#define BHIE_TXVECSTATUS_OFFS (0x0044) -#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) -#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) -#define BHIE_TXVECSTATUS_STATUS_SHFT (30) -#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) -#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) -#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) -#define BHIE_RXVECADDR_LOW_OFFS (0x0060) -#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) -#define BHIE_RXVECSIZE_OFFS (0x0068) -#define BHIE_RXVECDB_OFFS (0x0070) -#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_RXVECDB_SEQNUM_SHFT (0) -#define BHIE_RXVECSTATUS_OFFS (0x0078) -#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) -#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) -#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) -#define BHIE_RXVECSTATUS_STATUS_SHFT (30) -#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) -#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) -#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) - -#define SOC_HW_VERSION_OFFS (0x224) -#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) -#define SOC_HW_VERSION_FAM_NUM_SHFT (28) -#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) -#define SOC_HW_VERSION_DEV_NUM_SHFT (16) -#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) -#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) -#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) -#define SOC_HW_VERSION_MINOR_VER_SHFT (0) - -#define EV_CTX_RESERVED_MASK GENMASK(7, 0) -#define EV_CTX_INTMODC_MASK GENMASK(15, 8) -#define EV_CTX_INTMODC_SHIFT 8 -#define EV_CTX_INTMODT_MASK GENMASK(31, 16) -#define EV_CTX_INTMODT_SHIFT 16 -struct mhi_event_ctxt { - __le32 intmod; - __le32 ertype; - __le32 msivec; - - __le64 rbase __packed __aligned(4); - __le64 rlen __packed __aligned(4); - __le64 rp __packed __aligned(4); - __le64 wp __packed __aligned(4); -}; - -#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) -#define CHAN_CTX_CHSTATE_SHIFT 0 -#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) -#define CHAN_CTX_BRSTMODE_SHIFT 8 -#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) -#define CHAN_CTX_POLLCFG_SHIFT 10 -#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) -struct mhi_chan_ctxt { - __le32 chcfg; - __le32 chtype; - __le32 erindex; - - __le64 rbase __packed __aligned(4); - __le64 rlen __packed __aligned(4); - __le64 rp __packed __aligned(4); - __le64 wp __packed __aligned(4); -}; - -struct mhi_cmd_ctxt { - __le32 reserved0; - __le32 reserved1; - __le32 reserved2; - - __le64 rbase __packed __aligned(4); - __le64 rlen __packed __aligned(4); - __le64 rp __packed __aligned(4); - __le64 wp __packed __aligned(4); -}; - -struct mhi_ctxt { - struct mhi_event_ctxt *er_ctxt; - struct mhi_chan_ctxt *chan_ctxt; - struct mhi_cmd_ctxt *cmd_ctxt; - dma_addr_t er_ctxt_addr; - dma_addr_t chan_ctxt_addr; - dma_addr_t cmd_ctxt_addr; -}; - -struct mhi_tre { - __le64 ptr; - __le32 dword[2]; -}; - -struct bhi_vec_entry { - u64 dma_addr; - u64 size; -}; - -enum mhi_cmd_type { - MHI_CMD_NOP = 1, - MHI_CMD_RESET_CHAN = 16, - MHI_CMD_STOP_CHAN = 17, - MHI_CMD_START_CHAN = 18, -}; - -/* No operation command */ -#define MHI_TRE_CMD_NOOP_PTR (0) -#define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16)) - -/* Channel reset command */ -#define MHI_TRE_CMD_RESET_PTR (0) -#define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \ - (MHI_CMD_RESET_CHAN << 16))) - -/* Channel stop command */ -#define MHI_TRE_CMD_STOP_PTR (0) -#define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \ - (MHI_CMD_STOP_CHAN << 16))) - -/* Channel start command */ -#define MHI_TRE_CMD_START_PTR (0) -#define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \ - (MHI_CMD_START_CHAN << 16))) - -#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) -#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) - -/* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) -#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) -#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) -#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) -#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) -#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) -#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) - -/* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) -#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \ - | (ieot << 9) | (ieob << 8) | chain)) - -/* RSC transfer descriptor macros */ -#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) -#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) - -enum mhi_pkt_type { - MHI_PKT_TYPE_INVALID = 0x0, - MHI_PKT_TYPE_NOOP_CMD = 0x1, - MHI_PKT_TYPE_TRANSFER = 0x2, - MHI_PKT_TYPE_COALESCING = 0x8, - MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, - MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, - MHI_PKT_TYPE_START_CHAN_CMD = 0x12, - MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, - MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, - MHI_PKT_TYPE_TX_EVENT = 0x22, - MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, - MHI_PKT_TYPE_EE_EVENT = 0x40, - MHI_PKT_TYPE_TSYNC_EVENT = 0x48, - MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, - MHI_PKT_TYPE_STALE_EVENT, /* internal event */ -}; - -/* MHI transfer completion events */ -enum mhi_ev_ccs { - MHI_EV_CC_INVALID = 0x0, - MHI_EV_CC_SUCCESS = 0x1, - MHI_EV_CC_EOT = 0x2, /* End of transfer event */ - MHI_EV_CC_OVERFLOW = 0x3, - MHI_EV_CC_EOB = 0x4, /* End of block event */ - MHI_EV_CC_OOB = 0x5, /* Out of block event */ - MHI_EV_CC_DB_MODE = 0x6, - MHI_EV_CC_UNDEFINED_ERR = 0x10, - MHI_EV_CC_BAD_TRE = 0x11, -}; - -enum mhi_ch_state { - MHI_CH_STATE_DISABLED = 0x0, - MHI_CH_STATE_ENABLED = 0x1, - MHI_CH_STATE_RUNNING = 0x2, - MHI_CH_STATE_SUSPENDED = 0x3, - MHI_CH_STATE_STOP = 0x4, - MHI_CH_STATE_ERROR = 0x5, -}; - -enum mhi_ch_state_type { - MHI_CH_STATE_TYPE_RESET, - MHI_CH_STATE_TYPE_STOP, - MHI_CH_STATE_TYPE_START, - MHI_CH_STATE_TYPE_MAX, -}; - -extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; -#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ - "INVALID_STATE" : \ - mhi_ch_state_type_str[(state)]) - -#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ - mode != MHI_DB_BRST_ENABLE) - -extern const char * const mhi_ee_str[MHI_EE_MAX]; -#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ - "INVALID_EE" : mhi_ee_str[ee]) - -#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ - ee == MHI_EE_EDL) -#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) -#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL) -#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ - ee == MHI_EE_FP) - -enum dev_st_transition { - DEV_ST_TRANSITION_PBL, - DEV_ST_TRANSITION_READY, - DEV_ST_TRANSITION_SBL, - DEV_ST_TRANSITION_MISSION_MODE, - DEV_ST_TRANSITION_FP, - DEV_ST_TRANSITION_SYS_ERR, - DEV_ST_TRANSITION_DISABLE, - DEV_ST_TRANSITION_MAX, -}; - -extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; -#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ - "INVALID_STATE" : dev_state_tran_str[state]) - -extern const char * const mhi_state_str[MHI_STATE_MAX]; -#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ - !mhi_state_str[state]) ? \ - "INVALID_STATE" : mhi_state_str[state]) - -/* internal power states */ -enum mhi_pm_state { - MHI_PM_STATE_DISABLE, - MHI_PM_STATE_POR, - MHI_PM_STATE_M0, - MHI_PM_STATE_M2, - MHI_PM_STATE_M3_ENTER, - MHI_PM_STATE_M3, - MHI_PM_STATE_M3_EXIT, - MHI_PM_STATE_FW_DL_ERR, - MHI_PM_STATE_SYS_ERR_DETECT, - MHI_PM_STATE_SYS_ERR_PROCESS, - MHI_PM_STATE_SHUTDOWN_PROCESS, - MHI_PM_STATE_LD_ERR_FATAL_DETECT, - MHI_PM_STATE_MAX -}; - -#define MHI_PM_DISABLE BIT(0) -#define MHI_PM_POR BIT(1) -#define MHI_PM_M0 BIT(2) -#define MHI_PM_M2 BIT(3) -#define MHI_PM_M3_ENTER BIT(4) -#define MHI_PM_M3 BIT(5) -#define MHI_PM_M3_EXIT BIT(6) -/* firmware download failure state */ -#define MHI_PM_FW_DL_ERR BIT(7) -#define MHI_PM_SYS_ERR_DETECT BIT(8) -#define MHI_PM_SYS_ERR_PROCESS BIT(9) -#define MHI_PM_SHUTDOWN_PROCESS BIT(10) -/* link not accessible */ -#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) - -#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ - MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ - MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ - MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) -#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) -#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) -#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ - mhi_cntrl->db_access) -#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ - MHI_PM_M2 | MHI_PM_M3_EXIT)) -#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) -#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) -#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ - MHI_PM_IN_ERROR_STATE(pm_state)) -#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ - (MHI_PM_M3_ENTER | MHI_PM_M3)) - -#define NR_OF_CMD_RINGS 1 -#define CMD_EL_PER_RING 128 -#define PRIMARY_CMD_RING 0 -#define MHI_DEV_WAKE_DB 127 -#define MHI_MAX_MTU 0xffff -#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) - -enum mhi_er_type { - MHI_ER_TYPE_INVALID = 0x0, - MHI_ER_TYPE_VALID = 0x1, -}; - -struct db_cfg { - bool reset_req; - bool db_mode; - u32 pollcfg; - enum mhi_db_brst_mode brstmode; - dma_addr_t db_val; - void (*process_db)(struct mhi_controller *mhi_cntrl, - struct db_cfg *db_cfg, void __iomem *io_addr, - dma_addr_t db_val); -}; - -struct mhi_pm_transitions { - enum mhi_pm_state from_state; - u32 to_states; -}; - -struct state_transition { - struct list_head node; - enum dev_st_transition state; -}; - -struct mhi_ring { - dma_addr_t dma_handle; - dma_addr_t iommu_base; - __le64 *ctxt_wp; /* point to ctxt wp */ - void *pre_aligned; - void *base; - void *rp; - void *wp; - size_t el_size; - size_t len; - size_t elements; - size_t alloc_size; - void __iomem *db_addr; -}; - -struct mhi_cmd { - struct mhi_ring ring; - spinlock_t lock; -}; - -struct mhi_buf_info { - void *v_addr; - void *bb_addr; - void *wp; - void *cb_buf; - dma_addr_t p_addr; - size_t len; - enum dma_data_direction dir; - bool used; /* Indicates whether the buffer is used or not */ - bool pre_mapped; /* Already pre-mapped by client */ -}; - -struct mhi_event { - struct mhi_controller *mhi_cntrl; - struct mhi_chan *mhi_chan; /* dedicated to channel */ - u32 er_index; - u32 intmod; - u32 irq; - int chan; /* this event ring is dedicated to a channel (optional) */ - u32 priority; - enum mhi_er_data_type data_type; - struct mhi_ring ring; - struct db_cfg db_cfg; - struct tasklet_struct task; - spinlock_t lock; - int (*process_event)(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - u32 event_quota); - bool hw_ring; - bool cl_manage; - bool offload_ev; /* managed by a device driver */ -}; - -struct mhi_chan { - const char *name; - /* - * Important: When consuming, increment tre_ring first and when - * releasing, decrement buf_ring first. If tre_ring has space, buf_ring - * is guranteed to have space so we do not need to check both rings. - */ - struct mhi_ring buf_ring; - struct mhi_ring tre_ring; - u32 chan; - u32 er_index; - u32 intmod; - enum mhi_ch_type type; - enum dma_data_direction dir; - struct db_cfg db_cfg; - enum mhi_ch_ee_mask ee_mask; - enum mhi_ch_state ch_state; - enum mhi_ev_ccs ccs; - struct mhi_device *mhi_dev; - void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); - struct mutex mutex; - struct completion completion; - rwlock_t lock; - struct list_head node; - bool lpm_notify; - bool configured; - bool offload_ch; - bool pre_alloc; - bool wake_capable; -}; - -/* Default MHI timeout */ -#define MHI_TIMEOUT_MS (1000) - -/* debugfs related functions */ -#ifdef CONFIG_MHI_BUS_DEBUG -void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); -void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); -void mhi_debugfs_init(void); -void mhi_debugfs_exit(void); -#else -static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) -{ -} - -static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) -{ -} - -static inline void mhi_debugfs_init(void) -{ -} - -static inline void mhi_debugfs_exit(void) -{ -} -#endif - -struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); - -int mhi_destroy_device(struct device *dev, void *data); -void mhi_create_devices(struct mhi_controller *mhi_cntrl); - -int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, - struct image_info **image_info, size_t alloc_size); -void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, - struct image_info *image_info); - -/* Power management APIs */ -enum mhi_pm_state __must_check mhi_tryset_pm_state( - struct mhi_controller *mhi_cntrl, - enum mhi_pm_state state); -const char *to_mhi_pm_state_str(u32 state); -int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, - enum dev_st_transition state); -void mhi_pm_st_worker(struct work_struct *work); -void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); -int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); -int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); -void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); -int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); -int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); -int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - enum mhi_cmd_type cmd); -int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); -static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) -{ - return (mhi_cntrl->dev_state >= MHI_STATE_M0 && - mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); -} - -static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) -{ - pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); - mhi_cntrl->runtime_get(mhi_cntrl); - mhi_cntrl->runtime_put(mhi_cntrl); -} - -/* Register access methods */ -void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, - void __iomem *db_addr, dma_addr_t db_val); -void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, - struct db_cfg *db_mode, void __iomem *db_addr, - dma_addr_t db_val); -int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 *out); -int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 mask, - u32 shift, u32 *out); -int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 mask, - u32 shift, u32 val, u32 delayus); -void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 val); -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 shift, u32 val); -void mhi_ring_er_db(struct mhi_event *mhi_event); -void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, - dma_addr_t db_val); -void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); -void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); - -/* Initialization methods */ -int mhi_init_mmio(struct mhi_controller *mhi_cntrl); -int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); -void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); -int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); -void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, - struct image_info *img_info); -void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); - -/* Automatically allocate and queue inbound buffers */ -#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) -int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags); - -int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); -void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); -void mhi_reset_chan(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); - -/* Event processing methods */ -void mhi_ctrl_ev_task(unsigned long data); -void mhi_ev_task(unsigned long data); -int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, u32 event_quota); -int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, u32 event_quota); - -/* ISR handlers */ -irqreturn_t mhi_irq_handler(int irq_number, void *dev); -irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); -irqreturn_t mhi_intvec_handler(int irq_number, void *dev); - -int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - struct mhi_buf_info *info, enum mhi_flags flags); -int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); -int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); -void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); -void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info); - -#endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c deleted file mode 100644 index 85f4f7c8d7c6..000000000000 --- a/drivers/bus/mhi/core/main.c +++ /dev/null @@ -1,1687 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, u32 *out) -{ - return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); -} - -int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, - u32 mask, u32 shift, u32 *out) -{ - u32 tmp; - int ret; - - ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); - if (ret) - return ret; - - *out = (tmp & mask) >> shift; - - return 0; -} - -int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, - void __iomem *base, u32 offset, - u32 mask, u32 shift, u32 val, u32 delayus) -{ - int ret; - u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; - - while (retry--) { - ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, - &out); - if (ret) - return ret; - - if (out == val) - return 0; - - fsleep(delayus); - } - - return -ETIMEDOUT; -} - -void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 val) -{ - mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); -} - -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 shift, u32 val) -{ - int ret; - u32 tmp; - - ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); - if (ret) - return; - - tmp &= ~mask; - tmp |= (val << shift); - mhi_write_reg(mhi_cntrl, base, offset, tmp); -} - -void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, - dma_addr_t db_val) -{ - mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); - mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); -} - -void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, - struct db_cfg *db_cfg, - void __iomem *db_addr, - dma_addr_t db_val) -{ - if (db_cfg->db_mode) { - db_cfg->db_val = db_val; - mhi_write_db(mhi_cntrl, db_addr, db_val); - db_cfg->db_mode = 0; - } -} - -void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, - struct db_cfg *db_cfg, - void __iomem *db_addr, - dma_addr_t db_val) -{ - db_cfg->db_val = db_val; - mhi_write_db(mhi_cntrl, db_addr, db_val); -} - -void mhi_ring_er_db(struct mhi_event *mhi_event) -{ - struct mhi_ring *ring = &mhi_event->ring; - - mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, - ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); -} - -void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) -{ - dma_addr_t db; - struct mhi_ring *ring = &mhi_cmd->ring; - - db = ring->iommu_base + (ring->wp - ring->base); - *ring->ctxt_wp = cpu_to_le64(db); - mhi_write_db(mhi_cntrl, ring->db_addr, db); -} - -void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) -{ - struct mhi_ring *ring = &mhi_chan->tre_ring; - dma_addr_t db; - - db = ring->iommu_base + (ring->wp - ring->base); - - /* - * Writes to the new ring element must be visible to the hardware - * before letting h/w know there is new element to fetch. - */ - dma_wmb(); - *ring->ctxt_wp = cpu_to_le64(db); - - mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, - ring->db_addr, db); -} - -enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) -{ - u32 exec; - int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); - - return (ret) ? MHI_EE_MAX : exec; -} -EXPORT_SYMBOL_GPL(mhi_get_exec_env); - -enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) -{ - u32 state; - int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_MHISTATE_MASK, - MHISTATUS_MHISTATE_SHIFT, &state); - return ret ? MHI_STATE_MAX : state; -} -EXPORT_SYMBOL_GPL(mhi_get_mhi_state); - -void mhi_soc_reset(struct mhi_controller *mhi_cntrl) -{ - if (mhi_cntrl->reset) { - mhi_cntrl->reset(mhi_cntrl); - return; - } - - /* Generic MHI SoC reset */ - mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, - MHI_SOC_RESET_REQ); -} -EXPORT_SYMBOL_GPL(mhi_soc_reset); - -int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info) -{ - buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, - buf_info->v_addr, buf_info->len, - buf_info->dir); - if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) - return -ENOMEM; - - return 0; -} - -int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info) -{ - void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, - &buf_info->p_addr, GFP_ATOMIC); - - if (!buf) - return -ENOMEM; - - if (buf_info->dir == DMA_TO_DEVICE) - memcpy(buf, buf_info->v_addr, buf_info->len); - - buf_info->bb_addr = buf; - - return 0; -} - -void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info) -{ - dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, - buf_info->dir); -} - -void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, - struct mhi_buf_info *buf_info) -{ - if (buf_info->dir == DMA_FROM_DEVICE) - memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); - - dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, - buf_info->bb_addr, buf_info->p_addr); -} - -static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, - struct mhi_ring *ring) -{ - int nr_el; - - if (ring->wp < ring->rp) { - nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; - } else { - nr_el = (ring->rp - ring->base) / ring->el_size; - nr_el += ((ring->base + ring->len - ring->wp) / - ring->el_size) - 1; - } - - return nr_el; -} - -static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) -{ - return (addr - ring->iommu_base) + ring->base; -} - -static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, - struct mhi_ring *ring) -{ - ring->wp += ring->el_size; - if (ring->wp >= (ring->base + ring->len)) - ring->wp = ring->base; - /* smp update */ - smp_wmb(); -} - -static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, - struct mhi_ring *ring) -{ - ring->rp += ring->el_size; - if (ring->rp >= (ring->base + ring->len)) - ring->rp = ring->base; - /* smp update */ - smp_wmb(); -} - -static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) -{ - return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; -} - -int mhi_destroy_device(struct device *dev, void *data) -{ - struct mhi_chan *ul_chan, *dl_chan; - struct mhi_device *mhi_dev; - struct mhi_controller *mhi_cntrl; - enum mhi_ee_type ee = MHI_EE_MAX; - - if (dev->bus != &mhi_bus_type) - return 0; - - mhi_dev = to_mhi_device(dev); - mhi_cntrl = mhi_dev->mhi_cntrl; - - /* Only destroy virtual devices thats attached to bus */ - if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) - return 0; - - ul_chan = mhi_dev->ul_chan; - dl_chan = mhi_dev->dl_chan; - - /* - * If execution environment is specified, remove only those devices that - * started in them based on ee_mask for the channels as we move on to a - * different execution environment - */ - if (data) - ee = *(enum mhi_ee_type *)data; - - /* - * For the suspend and resume case, this function will get called - * without mhi_unregister_controller(). Hence, we need to drop the - * references to mhi_dev created for ul and dl channels. We can - * be sure that there will be no instances of mhi_dev left after - * this. - */ - if (ul_chan) { - if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) - return 0; - - put_device(&ul_chan->mhi_dev->dev); - } - - if (dl_chan) { - if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) - return 0; - - put_device(&dl_chan->mhi_dev->dev); - } - - dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", - mhi_dev->name); - - /* Notify the client and remove the device from MHI bus */ - device_del(dev); - put_device(dev); - - return 0; -} - -int mhi_get_free_desc_count(struct mhi_device *mhi_dev, - enum dma_data_direction dir) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? - mhi_dev->ul_chan : mhi_dev->dl_chan; - struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - - return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); -} -EXPORT_SYMBOL_GPL(mhi_get_free_desc_count); - -void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) -{ - struct mhi_driver *mhi_drv; - - if (!mhi_dev->dev.driver) - return; - - mhi_drv = to_mhi_driver(mhi_dev->dev.driver); - - if (mhi_drv->status_cb) - mhi_drv->status_cb(mhi_dev, cb_reason); -} -EXPORT_SYMBOL_GPL(mhi_notify); - -/* Bind MHI channels to MHI devices */ -void mhi_create_devices(struct mhi_controller *mhi_cntrl) -{ - struct mhi_chan *mhi_chan; - struct mhi_device *mhi_dev; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int i, ret; - - mhi_chan = mhi_cntrl->mhi_chan; - for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { - if (!mhi_chan->configured || mhi_chan->mhi_dev || - !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) - continue; - mhi_dev = mhi_alloc_device(mhi_cntrl); - if (IS_ERR(mhi_dev)) - return; - - mhi_dev->dev_type = MHI_DEVICE_XFER; - switch (mhi_chan->dir) { - case DMA_TO_DEVICE: - mhi_dev->ul_chan = mhi_chan; - mhi_dev->ul_chan_id = mhi_chan->chan; - break; - case DMA_FROM_DEVICE: - /* We use dl_chan as offload channels */ - mhi_dev->dl_chan = mhi_chan; - mhi_dev->dl_chan_id = mhi_chan->chan; - break; - default: - dev_err(dev, "Direction not supported\n"); - put_device(&mhi_dev->dev); - return; - } - - get_device(&mhi_dev->dev); - mhi_chan->mhi_dev = mhi_dev; - - /* Check next channel if it matches */ - if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { - if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { - i++; - mhi_chan++; - if (mhi_chan->dir == DMA_TO_DEVICE) { - mhi_dev->ul_chan = mhi_chan; - mhi_dev->ul_chan_id = mhi_chan->chan; - } else { - mhi_dev->dl_chan = mhi_chan; - mhi_dev->dl_chan_id = mhi_chan->chan; - } - get_device(&mhi_dev->dev); - mhi_chan->mhi_dev = mhi_dev; - } - } - - /* Channel name is same for both UL and DL */ - mhi_dev->name = mhi_chan->name; - dev_set_name(&mhi_dev->dev, "%s_%s", - dev_name(&mhi_cntrl->mhi_dev->dev), - mhi_dev->name); - - /* Init wakeup source if available */ - if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) - device_init_wakeup(&mhi_dev->dev, true); - - ret = device_add(&mhi_dev->dev); - if (ret) - put_device(&mhi_dev->dev); - } -} - -irqreturn_t mhi_irq_handler(int irq_number, void *dev) -{ - struct mhi_event *mhi_event = dev; - struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; - struct mhi_event_ctxt *er_ctxt = - &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; - struct mhi_ring *ev_ring = &mhi_event->ring; - dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); - void *dev_rp; - - if (!is_valid_ring_ptr(ev_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event ring rp points outside of the event ring\n"); - return IRQ_HANDLED; - } - - dev_rp = mhi_to_virtual(ev_ring, ptr); - - /* Only proceed if event ring has pending events */ - if (ev_ring->rp == dev_rp) - return IRQ_HANDLED; - - /* For client managed event ring, notify pending data */ - if (mhi_event->cl_manage) { - struct mhi_chan *mhi_chan = mhi_event->mhi_chan; - struct mhi_device *mhi_dev = mhi_chan->mhi_dev; - - if (mhi_dev) - mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); - } else { - tasklet_schedule(&mhi_event->task); - } - - return IRQ_HANDLED; -} - -irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) -{ - struct mhi_controller *mhi_cntrl = priv; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - enum mhi_state state; - enum mhi_pm_state pm_state = 0; - enum mhi_ee_type ee; - - write_lock_irq(&mhi_cntrl->pm_lock); - if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - write_unlock_irq(&mhi_cntrl->pm_lock); - goto exit_intvec; - } - - state = mhi_get_mhi_state(mhi_cntrl); - ee = mhi_get_exec_env(mhi_cntrl); - dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee), - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); - - if (state == MHI_STATE_SYS_ERR) { - dev_dbg(dev, "System error detected\n"); - pm_state = mhi_tryset_pm_state(mhi_cntrl, - MHI_PM_SYS_ERR_DETECT); - } - write_unlock_irq(&mhi_cntrl->pm_lock); - - if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) - goto exit_intvec; - - switch (ee) { - case MHI_EE_RDDM: - /* proceed if power down is not already in progress */ - if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); - mhi_cntrl->ee = ee; - wake_up_all(&mhi_cntrl->state_event); - } - break; - case MHI_EE_PBL: - case MHI_EE_EDL: - case MHI_EE_PTHRU: - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); - mhi_cntrl->ee = ee; - wake_up_all(&mhi_cntrl->state_event); - mhi_pm_sys_err_handler(mhi_cntrl); - break; - default: - wake_up_all(&mhi_cntrl->state_event); - mhi_pm_sys_err_handler(mhi_cntrl); - break; - } - -exit_intvec: - - return IRQ_HANDLED; -} - -irqreturn_t mhi_intvec_handler(int irq_number, void *dev) -{ - struct mhi_controller *mhi_cntrl = dev; - - /* Wake up events waiting for state change */ - wake_up_all(&mhi_cntrl->state_event); - - return IRQ_WAKE_THREAD; -} - -static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, - struct mhi_ring *ring) -{ - dma_addr_t ctxt_wp; - - /* Update the WP */ - ring->wp += ring->el_size; - ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; - - if (ring->wp >= (ring->base + ring->len)) { - ring->wp = ring->base; - ctxt_wp = ring->iommu_base; - } - - *ring->ctxt_wp = cpu_to_le64(ctxt_wp); - - /* Update the RP */ - ring->rp += ring->el_size; - if (ring->rp >= (ring->base + ring->len)) - ring->rp = ring->base; - - /* Update to all cores */ - smp_wmb(); -} - -static int parse_xfer_event(struct mhi_controller *mhi_cntrl, - struct mhi_tre *event, - struct mhi_chan *mhi_chan) -{ - struct mhi_ring *buf_ring, *tre_ring; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - struct mhi_result result; - unsigned long flags = 0; - u32 ev_code; - - ev_code = MHI_TRE_GET_EV_CODE(event); - buf_ring = &mhi_chan->buf_ring; - tre_ring = &mhi_chan->tre_ring; - - result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? - -EOVERFLOW : 0; - - /* - * If it's a DB Event then we need to grab the lock - * with preemption disabled and as a write because we - * have to update db register and there are chances that - * another thread could be doing the same. - */ - if (ev_code >= MHI_EV_CC_OOB) - write_lock_irqsave(&mhi_chan->lock, flags); - else - read_lock_bh(&mhi_chan->lock); - - if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) - goto end_process_tx_event; - - switch (ev_code) { - case MHI_EV_CC_OVERFLOW: - case MHI_EV_CC_EOB: - case MHI_EV_CC_EOT: - { - dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); - struct mhi_tre *local_rp, *ev_tre; - void *dev_rp; - struct mhi_buf_info *buf_info; - u16 xfer_len; - - if (!is_valid_ring_ptr(tre_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event element points outside of the tre ring\n"); - break; - } - /* Get the TRB this event points to */ - ev_tre = mhi_to_virtual(tre_ring, ptr); - - dev_rp = ev_tre + 1; - if (dev_rp >= (tre_ring->base + tre_ring->len)) - dev_rp = tre_ring->base; - - result.dir = mhi_chan->dir; - - local_rp = tre_ring->rp; - while (local_rp != dev_rp) { - buf_info = buf_ring->rp; - /* If it's the last TRE, get length from the event */ - if (local_rp == ev_tre) - xfer_len = MHI_TRE_GET_EV_LEN(event); - else - xfer_len = buf_info->len; - - /* Unmap if it's not pre-mapped by client */ - if (likely(!buf_info->pre_mapped)) - mhi_cntrl->unmap_single(mhi_cntrl, buf_info); - - result.buf_addr = buf_info->cb_buf; - - /* truncate to buf len if xfer_len is larger */ - result.bytes_xferd = - min_t(u16, xfer_len, buf_info->len); - mhi_del_ring_element(mhi_cntrl, buf_ring); - mhi_del_ring_element(mhi_cntrl, tre_ring); - local_rp = tre_ring->rp; - - /* notify client */ - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - - if (mhi_chan->dir == DMA_TO_DEVICE) { - atomic_dec(&mhi_cntrl->pending_pkts); - /* Release the reference got from mhi_queue() */ - mhi_cntrl->runtime_put(mhi_cntrl); - } - - /* - * Recycle the buffer if buffer is pre-allocated, - * if there is an error, not much we can do apart - * from dropping the packet - */ - if (mhi_chan->pre_alloc) { - if (mhi_queue_buf(mhi_chan->mhi_dev, - mhi_chan->dir, - buf_info->cb_buf, - buf_info->len, MHI_EOT)) { - dev_err(dev, - "Error recycling buffer for chan:%d\n", - mhi_chan->chan); - kfree(buf_info->cb_buf); - } - } - } - break; - } /* CC_EOT */ - case MHI_EV_CC_OOB: - case MHI_EV_CC_DB_MODE: - { - unsigned long pm_lock_flags; - - mhi_chan->db_cfg.db_mode = 1; - read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); - if (tre_ring->wp != tre_ring->rp && - MHI_DB_ACCESS_VALID(mhi_cntrl)) { - mhi_ring_chan_db(mhi_cntrl, mhi_chan); - } - read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); - break; - } - case MHI_EV_CC_BAD_TRE: - default: - dev_err(dev, "Unknown event 0x%x\n", ev_code); - break; - } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ - -end_process_tx_event: - if (ev_code >= MHI_EV_CC_OOB) - write_unlock_irqrestore(&mhi_chan->lock, flags); - else - read_unlock_bh(&mhi_chan->lock); - - return 0; -} - -static int parse_rsc_event(struct mhi_controller *mhi_cntrl, - struct mhi_tre *event, - struct mhi_chan *mhi_chan) -{ - struct mhi_ring *buf_ring, *tre_ring; - struct mhi_buf_info *buf_info; - struct mhi_result result; - int ev_code; - u32 cookie; /* offset to local descriptor */ - u16 xfer_len; - - buf_ring = &mhi_chan->buf_ring; - tre_ring = &mhi_chan->tre_ring; - - ev_code = MHI_TRE_GET_EV_CODE(event); - cookie = MHI_TRE_GET_EV_COOKIE(event); - xfer_len = MHI_TRE_GET_EV_LEN(event); - - /* Received out of bound cookie */ - WARN_ON(cookie >= buf_ring->len); - - buf_info = buf_ring->base + cookie; - - result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? - -EOVERFLOW : 0; - - /* truncate to buf len if xfer_len is larger */ - result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); - result.buf_addr = buf_info->cb_buf; - result.dir = mhi_chan->dir; - - read_lock_bh(&mhi_chan->lock); - - if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) - goto end_process_rsc_event; - - WARN_ON(!buf_info->used); - - /* notify the client */ - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - - /* - * Note: We're arbitrarily incrementing RP even though, completion - * packet we processed might not be the same one, reason we can do this - * is because device guaranteed to cache descriptors in order it - * receive, so even though completion event is different we can re-use - * all descriptors in between. - * Example: - * Transfer Ring has descriptors: A, B, C, D - * Last descriptor host queue is D (WP) and first descriptor - * host queue is A (RP). - * The completion event we just serviced is descriptor C. - * Then we can safely queue descriptors to replace A, B, and C - * even though host did not receive any completions. - */ - mhi_del_ring_element(mhi_cntrl, tre_ring); - buf_info->used = false; - -end_process_rsc_event: - read_unlock_bh(&mhi_chan->lock); - - return 0; -} - -static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, - struct mhi_tre *tre) -{ - dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); - struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; - struct mhi_ring *mhi_ring = &cmd_ring->ring; - struct mhi_tre *cmd_pkt; - struct mhi_chan *mhi_chan; - u32 chan; - - if (!is_valid_ring_ptr(mhi_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event element points outside of the cmd ring\n"); - return; - } - - cmd_pkt = mhi_to_virtual(mhi_ring, ptr); - - chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); - - if (chan < mhi_cntrl->max_chan && - mhi_cntrl->mhi_chan[chan].configured) { - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - write_lock_bh(&mhi_chan->lock); - mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); - complete(&mhi_chan->completion); - write_unlock_bh(&mhi_chan->lock); - } else { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Completion packet for invalid channel ID: %d\n", chan); - } - - mhi_del_ring_element(mhi_cntrl, mhi_ring); -} - -int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - u32 event_quota) -{ - struct mhi_tre *dev_rp, *local_rp; - struct mhi_ring *ev_ring = &mhi_event->ring; - struct mhi_event_ctxt *er_ctxt = - &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; - struct mhi_chan *mhi_chan; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 chan; - int count = 0; - dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); - - /* - * This is a quick check to avoid unnecessary event processing - * in case MHI is already in error state, but it's still possible - * to transition to error state while processing events - */ - if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) - return -EIO; - - if (!is_valid_ring_ptr(ev_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event ring rp points outside of the event ring\n"); - return -EIO; - } - - dev_rp = mhi_to_virtual(ev_ring, ptr); - local_rp = ev_ring->rp; - - while (dev_rp != local_rp) { - enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); - - switch (type) { - case MHI_PKT_TYPE_BW_REQ_EVENT: - { - struct mhi_link_info *link_info; - - link_info = &mhi_cntrl->mhi_link_info; - write_lock_irq(&mhi_cntrl->pm_lock); - link_info->target_link_speed = - MHI_TRE_GET_EV_LINKSPEED(local_rp); - link_info->target_link_width = - MHI_TRE_GET_EV_LINKWIDTH(local_rp); - write_unlock_irq(&mhi_cntrl->pm_lock); - dev_dbg(dev, "Received BW_REQ event\n"); - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); - break; - } - case MHI_PKT_TYPE_STATE_CHANGE_EVENT: - { - enum mhi_state new_state; - - new_state = MHI_TRE_GET_EV_STATE(local_rp); - - dev_dbg(dev, "State change event to state: %s\n", - TO_MHI_STATE_STR(new_state)); - - switch (new_state) { - case MHI_STATE_M0: - mhi_pm_m0_transition(mhi_cntrl); - break; - case MHI_STATE_M1: - mhi_pm_m1_transition(mhi_cntrl); - break; - case MHI_STATE_M3: - mhi_pm_m3_transition(mhi_cntrl); - break; - case MHI_STATE_SYS_ERR: - { - enum mhi_pm_state pm_state; - - dev_dbg(dev, "System error detected\n"); - write_lock_irq(&mhi_cntrl->pm_lock); - pm_state = mhi_tryset_pm_state(mhi_cntrl, - MHI_PM_SYS_ERR_DETECT); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (pm_state == MHI_PM_SYS_ERR_DETECT) - mhi_pm_sys_err_handler(mhi_cntrl); - break; - } - default: - dev_err(dev, "Invalid state: %s\n", - TO_MHI_STATE_STR(new_state)); - } - - break; - } - case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: - mhi_process_cmd_completion(mhi_cntrl, local_rp); - break; - case MHI_PKT_TYPE_EE_EVENT: - { - enum dev_st_transition st = DEV_ST_TRANSITION_MAX; - enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); - - dev_dbg(dev, "Received EE event: %s\n", - TO_MHI_EXEC_STR(event)); - switch (event) { - case MHI_EE_SBL: - st = DEV_ST_TRANSITION_SBL; - break; - case MHI_EE_WFW: - case MHI_EE_AMSS: - st = DEV_ST_TRANSITION_MISSION_MODE; - break; - case MHI_EE_FP: - st = DEV_ST_TRANSITION_FP; - break; - case MHI_EE_RDDM: - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_cntrl->ee = event; - write_unlock_irq(&mhi_cntrl->pm_lock); - wake_up_all(&mhi_cntrl->state_event); - break; - default: - dev_err(dev, - "Unhandled EE event: 0x%x\n", type); - } - if (st != DEV_ST_TRANSITION_MAX) - mhi_queue_state_transition(mhi_cntrl, st); - - break; - } - case MHI_PKT_TYPE_TX_EVENT: - chan = MHI_TRE_GET_EV_CHID(local_rp); - - WARN_ON(chan >= mhi_cntrl->max_chan); - - /* - * Only process the event ring elements whose channel - * ID is within the maximum supported range. - */ - if (chan < mhi_cntrl->max_chan) { - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - if (!mhi_chan->configured) - break; - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; - } - break; - default: - dev_err(dev, "Unhandled event type: %d\n", type); - break; - } - - mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); - local_rp = ev_ring->rp; - - ptr = le64_to_cpu(er_ctxt->rp); - if (!is_valid_ring_ptr(ev_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event ring rp points outside of the event ring\n"); - return -EIO; - } - - dev_rp = mhi_to_virtual(ev_ring, ptr); - count++; - } - - read_lock_bh(&mhi_cntrl->pm_lock); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) - mhi_ring_er_db(mhi_event); - read_unlock_bh(&mhi_cntrl->pm_lock); - - return count; -} - -int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - u32 event_quota) -{ - struct mhi_tre *dev_rp, *local_rp; - struct mhi_ring *ev_ring = &mhi_event->ring; - struct mhi_event_ctxt *er_ctxt = - &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; - int count = 0; - u32 chan; - struct mhi_chan *mhi_chan; - dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); - - if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) - return -EIO; - - if (!is_valid_ring_ptr(ev_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event ring rp points outside of the event ring\n"); - return -EIO; - } - - dev_rp = mhi_to_virtual(ev_ring, ptr); - local_rp = ev_ring->rp; - - while (dev_rp != local_rp && event_quota > 0) { - enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); - - chan = MHI_TRE_GET_EV_CHID(local_rp); - - WARN_ON(chan >= mhi_cntrl->max_chan); - - /* - * Only process the event ring elements whose channel - * ID is within the maximum supported range. - */ - if (chan < mhi_cntrl->max_chan && - mhi_cntrl->mhi_chan[chan].configured) { - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - - if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; - } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { - parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; - } - } - - mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); - local_rp = ev_ring->rp; - - ptr = le64_to_cpu(er_ctxt->rp); - if (!is_valid_ring_ptr(ev_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event ring rp points outside of the event ring\n"); - return -EIO; - } - - dev_rp = mhi_to_virtual(ev_ring, ptr); - count++; - } - read_lock_bh(&mhi_cntrl->pm_lock); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) - mhi_ring_er_db(mhi_event); - read_unlock_bh(&mhi_cntrl->pm_lock); - - return count; -} - -void mhi_ev_task(unsigned long data) -{ - struct mhi_event *mhi_event = (struct mhi_event *)data; - struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; - - /* process all pending events */ - spin_lock_bh(&mhi_event->lock); - mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); - spin_unlock_bh(&mhi_event->lock); -} - -void mhi_ctrl_ev_task(unsigned long data) -{ - struct mhi_event *mhi_event = (struct mhi_event *)data; - struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - enum mhi_state state; - enum mhi_pm_state pm_state = 0; - int ret; - - /* - * We can check PM state w/o a lock here because there is no way - * PM state can change from reg access valid to no access while this - * thread being executed. - */ - if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - /* - * We may have a pending event but not allowed to - * process it since we are probably in a suspended state, - * so trigger a resume. - */ - mhi_trigger_resume(mhi_cntrl); - - return; - } - - /* Process ctrl events */ - ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); - - /* - * We received an IRQ but no events to process, maybe device went to - * SYS_ERR state? Check the state to confirm. - */ - if (!ret) { - write_lock_irq(&mhi_cntrl->pm_lock); - state = mhi_get_mhi_state(mhi_cntrl); - if (state == MHI_STATE_SYS_ERR) { - dev_dbg(dev, "System error detected\n"); - pm_state = mhi_tryset_pm_state(mhi_cntrl, - MHI_PM_SYS_ERR_DETECT); - } - write_unlock_irq(&mhi_cntrl->pm_lock); - if (pm_state == MHI_PM_SYS_ERR_DETECT) - mhi_pm_sys_err_handler(mhi_cntrl); - } -} - -static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, - struct mhi_ring *ring) -{ - void *tmp = ring->wp + ring->el_size; - - if (tmp >= (ring->base + ring->len)) - tmp = ring->base; - - return (tmp == ring->rp); -} - -static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, - enum dma_data_direction dir, enum mhi_flags mflags) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : - mhi_dev->dl_chan; - struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - unsigned long flags; - int ret; - - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) - return -EIO; - - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - - ret = mhi_is_ring_full(mhi_cntrl, tre_ring); - if (unlikely(ret)) { - ret = -EAGAIN; - goto exit_unlock; - } - - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); - if (unlikely(ret)) - goto exit_unlock; - - /* Packet is queued, take a usage ref to exit M3 if necessary - * for host->device buffer, balanced put is done on buffer completion - * for device->host buffer, balanced put is after ringing the DB - */ - mhi_cntrl->runtime_get(mhi_cntrl); - - /* Assert dev_wake (to exit/prevent M1/M2)*/ - mhi_cntrl->wake_toggle(mhi_cntrl); - - if (mhi_chan->dir == DMA_TO_DEVICE) - atomic_inc(&mhi_cntrl->pending_pkts); - - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) - mhi_ring_chan_db(mhi_cntrl, mhi_chan); - - if (dir == DMA_FROM_DEVICE) - mhi_cntrl->runtime_put(mhi_cntrl); - -exit_unlock: - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); - - return ret; -} - -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, - struct sk_buff *skb, size_t len, enum mhi_flags mflags) -{ - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : - mhi_dev->dl_chan; - struct mhi_buf_info buf_info = { }; - - buf_info.v_addr = skb->data; - buf_info.cb_buf = skb; - buf_info.len = len; - - if (unlikely(mhi_chan->pre_alloc)) - return -EINVAL; - - return mhi_queue(mhi_dev, &buf_info, dir, mflags); -} -EXPORT_SYMBOL_GPL(mhi_queue_skb); - -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, - struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) -{ - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : - mhi_dev->dl_chan; - struct mhi_buf_info buf_info = { }; - - buf_info.p_addr = mhi_buf->dma_addr; - buf_info.cb_buf = mhi_buf; - buf_info.pre_mapped = true; - buf_info.len = len; - - if (unlikely(mhi_chan->pre_alloc)) - return -EINVAL; - - return mhi_queue(mhi_dev, &buf_info, dir, mflags); -} -EXPORT_SYMBOL_GPL(mhi_queue_dma); - -int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, - struct mhi_buf_info *info, enum mhi_flags flags) -{ - struct mhi_ring *buf_ring, *tre_ring; - struct mhi_tre *mhi_tre; - struct mhi_buf_info *buf_info; - int eot, eob, chain, bei; - int ret; - - buf_ring = &mhi_chan->buf_ring; - tre_ring = &mhi_chan->tre_ring; - - buf_info = buf_ring->wp; - WARN_ON(buf_info->used); - buf_info->pre_mapped = info->pre_mapped; - if (info->pre_mapped) - buf_info->p_addr = info->p_addr; - else - buf_info->v_addr = info->v_addr; - buf_info->cb_buf = info->cb_buf; - buf_info->wp = tre_ring->wp; - buf_info->dir = mhi_chan->dir; - buf_info->len = info->len; - - if (!info->pre_mapped) { - ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) - return ret; - } - - eob = !!(flags & MHI_EOB); - eot = !!(flags & MHI_EOT); - chain = !!(flags & MHI_CHAIN); - bei = !!(mhi_chan->intmod); - - mhi_tre = tre_ring->wp; - mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); - mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); - mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); - - /* increment WP */ - mhi_add_ring_element(mhi_cntrl, tre_ring); - mhi_add_ring_element(mhi_cntrl, buf_ring); - - return 0; -} - -int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, - void *buf, size_t len, enum mhi_flags mflags) -{ - struct mhi_buf_info buf_info = { }; - - buf_info.v_addr = buf; - buf_info.cb_buf = buf; - buf_info.len = len; - - return mhi_queue(mhi_dev, &buf_info, dir, mflags); -} -EXPORT_SYMBOL_GPL(mhi_queue_buf); - -bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? - mhi_dev->ul_chan : mhi_dev->dl_chan; - struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - - return mhi_is_ring_full(mhi_cntrl, tre_ring); -} -EXPORT_SYMBOL_GPL(mhi_queue_is_full); - -int mhi_send_cmd(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, - enum mhi_cmd_type cmd) -{ - struct mhi_tre *cmd_tre = NULL; - struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; - struct mhi_ring *ring = &mhi_cmd->ring; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int chan = 0; - - if (mhi_chan) - chan = mhi_chan->chan; - - spin_lock_bh(&mhi_cmd->lock); - if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { - spin_unlock_bh(&mhi_cmd->lock); - return -ENOMEM; - } - - /* prepare the cmd tre */ - cmd_tre = ring->wp; - switch (cmd) { - case MHI_CMD_RESET_CHAN: - cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; - cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; - cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); - break; - case MHI_CMD_STOP_CHAN: - cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; - cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; - cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); - break; - case MHI_CMD_START_CHAN: - cmd_tre->ptr = MHI_TRE_CMD_START_PTR; - cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; - cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); - break; - default: - dev_err(dev, "Command not supported\n"); - break; - } - - /* queue to hardware */ - mhi_add_ring_element(mhi_cntrl, ring); - read_lock_bh(&mhi_cntrl->pm_lock); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) - mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); - read_unlock_bh(&mhi_cntrl->pm_lock); - spin_unlock_bh(&mhi_cmd->lock); - - return 0; -} - -static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, - enum mhi_ch_state_type to_state) -{ - struct device *dev = &mhi_chan->mhi_dev->dev; - enum mhi_cmd_type cmd = MHI_CMD_NOP; - int ret; - - dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, - TO_CH_STATE_TYPE_STR(to_state)); - - switch (to_state) { - case MHI_CH_STATE_TYPE_RESET: - write_lock_irq(&mhi_chan->lock); - if (mhi_chan->ch_state != MHI_CH_STATE_STOP && - mhi_chan->ch_state != MHI_CH_STATE_ENABLED && - mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { - write_unlock_irq(&mhi_chan->lock); - return -EINVAL; - } - mhi_chan->ch_state = MHI_CH_STATE_DISABLED; - write_unlock_irq(&mhi_chan->lock); - - cmd = MHI_CMD_RESET_CHAN; - break; - case MHI_CH_STATE_TYPE_STOP: - if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) - return -EINVAL; - - cmd = MHI_CMD_STOP_CHAN; - break; - case MHI_CH_STATE_TYPE_START: - if (mhi_chan->ch_state != MHI_CH_STATE_STOP && - mhi_chan->ch_state != MHI_CH_STATE_DISABLED) - return -EINVAL; - - cmd = MHI_CMD_START_CHAN; - break; - default: - dev_err(dev, "%d: Channel state update to %s not allowed\n", - mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); - return -EINVAL; - } - - /* bring host and device out of suspended states */ - ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); - if (ret) - return ret; - mhi_cntrl->runtime_get(mhi_cntrl); - - reinit_completion(&mhi_chan->completion); - ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); - if (ret) { - dev_err(dev, "%d: Failed to send %s channel command\n", - mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); - goto exit_channel_update; - } - - ret = wait_for_completion_timeout(&mhi_chan->completion, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { - dev_err(dev, - "%d: Failed to receive %s channel command completion\n", - mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); - ret = -EIO; - goto exit_channel_update; - } - - ret = 0; - - if (to_state != MHI_CH_STATE_TYPE_RESET) { - write_lock_irq(&mhi_chan->lock); - mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? - MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; - write_unlock_irq(&mhi_chan->lock); - } - - dev_dbg(dev, "%d: Channel state change to %s successful\n", - mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); - -exit_channel_update: - mhi_cntrl->runtime_put(mhi_cntrl); - mhi_device_put(mhi_cntrl->mhi_dev); - - return ret; -} - -static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) -{ - int ret; - struct device *dev = &mhi_chan->mhi_dev->dev; - - mutex_lock(&mhi_chan->mutex); - - if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { - dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); - goto exit_unprepare_channel; - } - - /* no more processing events for this channel */ - ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, - MHI_CH_STATE_TYPE_RESET); - if (ret) - dev_err(dev, "%d: Failed to reset channel, still resetting\n", - mhi_chan->chan); - -exit_unprepare_channel: - write_lock_irq(&mhi_chan->lock); - mhi_chan->ch_state = MHI_CH_STATE_DISABLED; - write_unlock_irq(&mhi_chan->lock); - - if (!mhi_chan->offload_ch) { - mhi_reset_chan(mhi_cntrl, mhi_chan); - mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); - } - dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); - - mutex_unlock(&mhi_chan->mutex); -} - -int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags) -{ - int ret = 0; - struct device *dev = &mhi_chan->mhi_dev->dev; - - if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { - dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); - return -ENOTCONN; - } - - mutex_lock(&mhi_chan->mutex); - - /* Check of client manages channel context for offload channels */ - if (!mhi_chan->offload_ch) { - ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); - if (ret) - goto error_init_chan; - } - - ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, - MHI_CH_STATE_TYPE_START); - if (ret) - goto error_pm_state; - - if (mhi_chan->dir == DMA_FROM_DEVICE) - mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); - - /* Pre-allocate buffer for xfer ring */ - if (mhi_chan->pre_alloc) { - int nr_el = get_nr_avail_ring_elements(mhi_cntrl, - &mhi_chan->tre_ring); - size_t len = mhi_cntrl->buffer_len; - - while (nr_el--) { - void *buf; - struct mhi_buf_info info = { }; - - buf = kmalloc(len, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto error_pre_alloc; - } - - /* Prepare transfer descriptors */ - info.v_addr = buf; - info.cb_buf = buf; - info.len = len; - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); - if (ret) { - kfree(buf); - goto error_pre_alloc; - } - } - - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { - read_lock_irq(&mhi_chan->lock); - mhi_ring_chan_db(mhi_cntrl, mhi_chan); - read_unlock_irq(&mhi_chan->lock); - } - read_unlock_bh(&mhi_cntrl->pm_lock); - } - - mutex_unlock(&mhi_chan->mutex); - - return 0; - -error_pm_state: - if (!mhi_chan->offload_ch) - mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); - -error_init_chan: - mutex_unlock(&mhi_chan->mutex); - - return ret; - -error_pre_alloc: - mutex_unlock(&mhi_chan->mutex); - mhi_unprepare_channel(mhi_cntrl, mhi_chan); - - return ret; -} - -static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - struct mhi_event_ctxt *er_ctxt, - int chan) - -{ - struct mhi_tre *dev_rp, *local_rp; - struct mhi_ring *ev_ring; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - unsigned long flags; - dma_addr_t ptr; - - dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); - - ev_ring = &mhi_event->ring; - - /* mark all stale events related to channel as STALE event */ - spin_lock_irqsave(&mhi_event->lock, flags); - - ptr = le64_to_cpu(er_ctxt->rp); - if (!is_valid_ring_ptr(ev_ring, ptr)) { - dev_err(&mhi_cntrl->mhi_dev->dev, - "Event ring rp points outside of the event ring\n"); - dev_rp = ev_ring->rp; - } else { - dev_rp = mhi_to_virtual(ev_ring, ptr); - } - - local_rp = ev_ring->rp; - while (dev_rp != local_rp) { - if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && - chan == MHI_TRE_GET_EV_CHID(local_rp)) - local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, - MHI_PKT_TYPE_STALE_EVENT); - local_rp++; - if (local_rp == (ev_ring->base + ev_ring->len)) - local_rp = ev_ring->base; - } - - dev_dbg(dev, "Finished marking events as stale events\n"); - spin_unlock_irqrestore(&mhi_event->lock, flags); -} - -static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) -{ - struct mhi_ring *buf_ring, *tre_ring; - struct mhi_result result; - - /* Reset any pending buffers */ - buf_ring = &mhi_chan->buf_ring; - tre_ring = &mhi_chan->tre_ring; - result.transaction_status = -ENOTCONN; - result.bytes_xferd = 0; - while (tre_ring->rp != tre_ring->wp) { - struct mhi_buf_info *buf_info = buf_ring->rp; - - if (mhi_chan->dir == DMA_TO_DEVICE) { - atomic_dec(&mhi_cntrl->pending_pkts); - /* Release the reference got from mhi_queue() */ - mhi_cntrl->runtime_put(mhi_cntrl); - } - - if (!buf_info->pre_mapped) - mhi_cntrl->unmap_single(mhi_cntrl, buf_info); - - mhi_del_ring_element(mhi_cntrl, buf_ring); - mhi_del_ring_element(mhi_cntrl, tre_ring); - - if (mhi_chan->pre_alloc) { - kfree(buf_info->cb_buf); - } else { - result.buf_addr = buf_info->cb_buf; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - } - } -} - -void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) -{ - struct mhi_event *mhi_event; - struct mhi_event_ctxt *er_ctxt; - int chan = mhi_chan->chan; - - /* Nothing to reset, client doesn't queue buffers */ - if (mhi_chan->offload_ch) - return; - - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; - er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; - - mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); - - mhi_reset_data_chan(mhi_cntrl, mhi_chan); - - read_unlock_bh(&mhi_cntrl->pm_lock); -} - -static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) -{ - int ret, dir; - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan; - - for (dir = 0; dir < 2; dir++) { - mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; - if (!mhi_chan) - continue; - - ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); - if (ret) - goto error_open_chan; - } - - return 0; - -error_open_chan: - for (--dir; dir >= 0; dir--) { - mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; - if (!mhi_chan) - continue; - - mhi_unprepare_channel(mhi_cntrl, mhi_chan); - } - - return ret; -} - -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) -{ - return __mhi_prepare_for_transfer(mhi_dev, 0); -} -EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); - -int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev) -{ - return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); -} -EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue); - -void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan; - int dir; - - for (dir = 0; dir < 2; dir++) { - mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; - if (!mhi_chan) - continue; - - mhi_unprepare_channel(mhi_cntrl, mhi_chan); - } -} -EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); - -int mhi_poll(struct mhi_device *mhi_dev, u32 budget) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan = mhi_dev->dl_chan; - struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; - int ret; - - spin_lock_bh(&mhi_event->lock); - ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); - spin_unlock_bh(&mhi_event->lock); - - return ret; -} -EXPORT_SYMBOL_GPL(mhi_poll); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c deleted file mode 100644 index c35c5ddc7220..000000000000 --- a/drivers/bus/mhi/core/pm.c +++ /dev/null @@ -1,1260 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -/* - * Not all MHI state transitions are synchronous. Transitions like Linkdown, - * SYS_ERR, and shutdown can happen anytime asynchronously. This function will - * transition to a new state only if we're allowed to. - * - * Priority increases as we go down. For instance, from any state in L0, the - * transition can be made to states in L1, L2 and L3. A notable exception to - * this rule is state DISABLE. From DISABLE state we can only transition to - * POR state. Also, while in L2 state, user cannot jump back to previous - * L1 or L0 states. - * - * Valid transitions: - * L0: DISABLE <--> POR - * POR <--> POR - * POR -> M0 -> M2 --> M0 - * POR -> FW_DL_ERR - * FW_DL_ERR <--> FW_DL_ERR - * M0 <--> M0 - * M0 -> FW_DL_ERR - * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 - * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR - * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT - * SHUTDOWN_PROCESS -> DISABLE - * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT - * LD_ERR_FATAL_DETECT -> DISABLE - */ -static const struct mhi_pm_transitions dev_state_transitions[] = { - /* L0 States */ - { - MHI_PM_DISABLE, - MHI_PM_POR - }, - { - MHI_PM_POR, - MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | - MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR - }, - { - MHI_PM_M0, - MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | - MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR - }, - { - MHI_PM_M2, - MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT - }, - { - MHI_PM_M3_ENTER, - MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT - }, - { - MHI_PM_M3, - MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | - MHI_PM_LD_ERR_FATAL_DETECT - }, - { - MHI_PM_M3_EXIT, - MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT - }, - { - MHI_PM_FW_DL_ERR, - MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | - MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT - }, - /* L1 States */ - { - MHI_PM_SYS_ERR_DETECT, - MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT - }, - { - MHI_PM_SYS_ERR_PROCESS, - MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | - MHI_PM_LD_ERR_FATAL_DETECT - }, - /* L2 States */ - { - MHI_PM_SHUTDOWN_PROCESS, - MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT - }, - /* L3 States */ - { - MHI_PM_LD_ERR_FATAL_DETECT, - MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE - }, -}; - -enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, - enum mhi_pm_state state) -{ - unsigned long cur_state = mhi_cntrl->pm_state; - int index = find_last_bit(&cur_state, 32); - - if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) - return cur_state; - - if (unlikely(dev_state_transitions[index].from_state != cur_state)) - return cur_state; - - if (unlikely(!(dev_state_transitions[index].to_states & state))) - return cur_state; - - mhi_cntrl->pm_state = state; - return mhi_cntrl->pm_state; -} - -void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) -{ - if (state == MHI_STATE_RESET) { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); - } else { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_MHISTATE_MASK, - MHICTRL_MHISTATE_SHIFT, state); - } -} - -/* NOP for backward compatibility, host allowed to ring DB in M2 state */ -static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) -{ -} - -static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) -{ - mhi_cntrl->wake_get(mhi_cntrl, false); - mhi_cntrl->wake_put(mhi_cntrl, true); -} - -/* Handle device ready state transition */ -int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) -{ - struct mhi_event *mhi_event; - enum mhi_pm_state cur_state; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 interval_us = 25000; /* poll register field every 25 milliseconds */ - int ret, i; - - /* Check if device entered error state */ - if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { - dev_err(dev, "Device link is not accessible\n"); - return -EIO; - } - - /* Wait for RESET to be cleared and READY bit to be set by the device */ - ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, - interval_us); - if (ret) { - dev_err(dev, "Device failed to clear MHI Reset\n"); - return ret; - } - - ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1, - interval_us); - if (ret) { - dev_err(dev, "Device failed to enter MHI Ready\n"); - return ret; - } - - dev_dbg(dev, "Device in READY State\n"); - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); - mhi_cntrl->dev_state = MHI_STATE_READY; - write_unlock_irq(&mhi_cntrl->pm_lock); - - if (cur_state != MHI_PM_POR) { - dev_err(dev, "Error moving to state %s from %s\n", - to_mhi_pm_state_str(MHI_PM_POR), - to_mhi_pm_state_str(cur_state)); - return -EIO; - } - - read_lock_bh(&mhi_cntrl->pm_lock); - if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - dev_err(dev, "Device registers not accessible\n"); - goto error_mmio; - } - - /* Configure MMIO registers */ - ret = mhi_init_mmio(mhi_cntrl); - if (ret) { - dev_err(dev, "Error configuring MMIO registers\n"); - goto error_mmio; - } - - /* Add elements to all SW event rings */ - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - struct mhi_ring *ring = &mhi_event->ring; - - /* Skip if this is an offload or HW event */ - if (mhi_event->offload_ev || mhi_event->hw_ring) - continue; - - ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); - /* Update all cores */ - smp_wmb(); - - /* Ring the event ring db */ - spin_lock_irq(&mhi_event->lock); - mhi_ring_er_db(mhi_event); - spin_unlock_irq(&mhi_event->lock); - } - - /* Set MHI to M0 state */ - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); - read_unlock_bh(&mhi_cntrl->pm_lock); - - return 0; - -error_mmio: - read_unlock_bh(&mhi_cntrl->pm_lock); - - return -EIO; -} - -int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) -{ - enum mhi_pm_state cur_state; - struct mhi_chan *mhi_chan; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int i; - - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_cntrl->dev_state = MHI_STATE_M0; - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (unlikely(cur_state != MHI_PM_M0)) { - dev_err(dev, "Unable to transition to M0 state\n"); - return -EIO; - } - mhi_cntrl->M0++; - - /* Wake up the device */ - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_get(mhi_cntrl, true); - - /* Ring all event rings and CMD ring only if we're in mission mode */ - if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { - struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - struct mhi_cmd *mhi_cmd = - &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; - - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - if (mhi_event->offload_ev) - continue; - - spin_lock_irq(&mhi_event->lock); - mhi_ring_er_db(mhi_event); - spin_unlock_irq(&mhi_event->lock); - } - - /* Only ring primary cmd ring if ring is not empty */ - spin_lock_irq(&mhi_cmd->lock); - if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) - mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); - spin_unlock_irq(&mhi_cmd->lock); - } - - /* Ring channel DB registers */ - mhi_chan = mhi_cntrl->mhi_chan; - for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { - struct mhi_ring *tre_ring = &mhi_chan->tre_ring; - - if (mhi_chan->db_cfg.reset_req) { - write_lock_irq(&mhi_chan->lock); - mhi_chan->db_cfg.db_mode = true; - write_unlock_irq(&mhi_chan->lock); - } - - read_lock_irq(&mhi_chan->lock); - - /* Only ring DB if ring is not empty */ - if (tre_ring->base && tre_ring->wp != tre_ring->rp) - mhi_ring_chan_db(mhi_cntrl, mhi_chan); - read_unlock_irq(&mhi_chan->lock); - } - - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); - wake_up_all(&mhi_cntrl->state_event); - - return 0; -} - -/* - * After receiving the MHI state change event from the device indicating the - * transition to M1 state, the host can transition the device to M2 state - * for keeping it in low power state. - */ -void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) -{ - enum mhi_pm_state state; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - write_lock_irq(&mhi_cntrl->pm_lock); - state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); - if (state == MHI_PM_M2) { - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); - mhi_cntrl->dev_state = MHI_STATE_M2; - - write_unlock_irq(&mhi_cntrl->pm_lock); - - mhi_cntrl->M2++; - wake_up_all(&mhi_cntrl->state_event); - - /* If there are any pending resources, exit M2 immediately */ - if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || - atomic_read(&mhi_cntrl->dev_wake))) { - dev_dbg(dev, - "Exiting M2, pending_pkts: %d dev_wake: %d\n", - atomic_read(&mhi_cntrl->pending_pkts), - atomic_read(&mhi_cntrl->dev_wake)); - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_get(mhi_cntrl, true); - mhi_cntrl->wake_put(mhi_cntrl, true); - read_unlock_bh(&mhi_cntrl->pm_lock); - } else { - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); - } - } else { - write_unlock_irq(&mhi_cntrl->pm_lock); - } -} - -/* MHI M3 completion handler */ -int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) -{ - enum mhi_pm_state state; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_cntrl->dev_state = MHI_STATE_M3; - state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (state != MHI_PM_M3) { - dev_err(dev, "Unable to transition to M3 state\n"); - return -EIO; - } - - mhi_cntrl->M3++; - wake_up_all(&mhi_cntrl->state_event); - - return 0; -} - -/* Handle device Mission Mode transition */ -static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) -{ - struct mhi_event *mhi_event; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; - int i, ret; - - dev_dbg(dev, "Processing Mission Mode transition\n"); - - write_lock_irq(&mhi_cntrl->pm_lock); - if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) - ee = mhi_get_exec_env(mhi_cntrl); - - if (!MHI_IN_MISSION_MODE(ee)) { - mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; - write_unlock_irq(&mhi_cntrl->pm_lock); - wake_up_all(&mhi_cntrl->state_event); - return -EIO; - } - mhi_cntrl->ee = ee; - write_unlock_irq(&mhi_cntrl->pm_lock); - - wake_up_all(&mhi_cntrl->state_event); - - device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, - mhi_destroy_device); - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); - - /* Force MHI to be in M0 state before continuing */ - ret = __mhi_device_get_sync(mhi_cntrl); - if (ret) - return ret; - - read_lock_bh(&mhi_cntrl->pm_lock); - - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - ret = -EIO; - goto error_mission_mode; - } - - /* Add elements to all HW event rings */ - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - struct mhi_ring *ring = &mhi_event->ring; - - if (mhi_event->offload_ev || !mhi_event->hw_ring) - continue; - - ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); - /* Update to all cores */ - smp_wmb(); - - spin_lock_irq(&mhi_event->lock); - if (MHI_DB_ACCESS_VALID(mhi_cntrl)) - mhi_ring_er_db(mhi_event); - spin_unlock_irq(&mhi_event->lock); - } - - read_unlock_bh(&mhi_cntrl->pm_lock); - - /* - * The MHI devices are only created when the client device switches its - * Execution Environment (EE) to either SBL or AMSS states - */ - mhi_create_devices(mhi_cntrl); - - read_lock_bh(&mhi_cntrl->pm_lock); - -error_mission_mode: - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); - - return ret; -} - -/* Handle shutdown transitions */ -static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) -{ - enum mhi_pm_state cur_state; - struct mhi_event *mhi_event; - struct mhi_cmd_ctxt *cmd_ctxt; - struct mhi_cmd *mhi_cmd; - struct mhi_event_ctxt *er_ctxt; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int ret, i; - - dev_dbg(dev, "Processing disable transition with PM state: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - - mutex_lock(&mhi_cntrl->pm_mutex); - - /* Trigger MHI RESET so that the device will not access host memory */ - if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { - dev_dbg(dev, "Triggering MHI Reset in device\n"); - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); - - /* Wait for the reset bit to be cleared by the device */ - ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, - 25000); - if (ret) - dev_err(dev, "Device failed to clear MHI Reset\n"); - - /* - * Device will clear BHI_INTVEC as a part of RESET processing, - * hence re-program it - */ - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); - } - - dev_dbg(dev, - "Waiting for all pending event ring processing to complete\n"); - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - if (mhi_event->offload_ev) - continue; - free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); - tasklet_kill(&mhi_event->task); - } - - /* Release lock and wait for all pending threads to complete */ - mutex_unlock(&mhi_cntrl->pm_mutex); - dev_dbg(dev, "Waiting for all pending threads to complete\n"); - wake_up_all(&mhi_cntrl->state_event); - - dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); - - mutex_lock(&mhi_cntrl->pm_mutex); - - WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); - WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); - - /* Reset the ev rings and cmd rings */ - dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); - mhi_cmd = mhi_cntrl->mhi_cmd; - cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; - for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { - struct mhi_ring *ring = &mhi_cmd->ring; - - ring->rp = ring->base; - ring->wp = ring->base; - cmd_ctxt->rp = cmd_ctxt->rbase; - cmd_ctxt->wp = cmd_ctxt->rbase; - } - - mhi_event = mhi_cntrl->mhi_event; - er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, - mhi_event++) { - struct mhi_ring *ring = &mhi_event->ring; - - /* Skip offload events */ - if (mhi_event->offload_ev) - continue; - - ring->rp = ring->base; - ring->wp = ring->base; - er_ctxt->rp = er_ctxt->rbase; - er_ctxt->wp = er_ctxt->rbase; - } - - /* Move to disable state */ - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (unlikely(cur_state != MHI_PM_DISABLE)) - dev_err(dev, "Error moving from PM state: %s to: %s\n", - to_mhi_pm_state_str(cur_state), - to_mhi_pm_state_str(MHI_PM_DISABLE)); - - dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state)); - - mutex_unlock(&mhi_cntrl->pm_mutex); -} - -/* Handle system error transitions */ -static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) -{ - enum mhi_pm_state cur_state, prev_state; - enum dev_st_transition next_state; - struct mhi_event *mhi_event; - struct mhi_cmd_ctxt *cmd_ctxt; - struct mhi_cmd *mhi_cmd; - struct mhi_event_ctxt *er_ctxt; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int ret, i; - - dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); - - /* We must notify MHI control driver so it can clean up first */ - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); - - mutex_lock(&mhi_cntrl->pm_mutex); - write_lock_irq(&mhi_cntrl->pm_lock); - prev_state = mhi_cntrl->pm_state; - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); - write_unlock_irq(&mhi_cntrl->pm_lock); - - if (cur_state != MHI_PM_SYS_ERR_PROCESS) { - dev_err(dev, "Failed to transition from PM state: %s to: %s\n", - to_mhi_pm_state_str(cur_state), - to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); - goto exit_sys_error_transition; - } - - mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; - mhi_cntrl->dev_state = MHI_STATE_RESET; - - /* Wake up threads waiting for state transition */ - wake_up_all(&mhi_cntrl->state_event); - - /* Trigger MHI RESET so that the device will not access host memory */ - if (MHI_REG_ACCESS_VALID(prev_state)) { - u32 in_reset = -1; - unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); - - dev_dbg(dev, "Triggering MHI Reset in device\n"); - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); - - /* Wait for the reset bit to be cleared by the device */ - ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_read_reg_field(mhi_cntrl, - mhi_cntrl->regs, - MHICTRL, - MHICTRL_RESET_MASK, - MHICTRL_RESET_SHIFT, - &in_reset) || - !in_reset, timeout); - if (!ret || in_reset) { - dev_err(dev, "Device failed to exit MHI Reset state\n"); - goto exit_sys_error_transition; - } - - /* - * Device will clear BHI_INTVEC as a part of RESET processing, - * hence re-program it - */ - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); - } - - dev_dbg(dev, - "Waiting for all pending event ring processing to complete\n"); - mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - if (mhi_event->offload_ev) - continue; - tasklet_kill(&mhi_event->task); - } - - /* Release lock and wait for all pending threads to complete */ - mutex_unlock(&mhi_cntrl->pm_mutex); - dev_dbg(dev, "Waiting for all pending threads to complete\n"); - wake_up_all(&mhi_cntrl->state_event); - - dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); - - mutex_lock(&mhi_cntrl->pm_mutex); - - WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); - WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); - - /* Reset the ev rings and cmd rings */ - dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); - mhi_cmd = mhi_cntrl->mhi_cmd; - cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; - for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { - struct mhi_ring *ring = &mhi_cmd->ring; - - ring->rp = ring->base; - ring->wp = ring->base; - cmd_ctxt->rp = cmd_ctxt->rbase; - cmd_ctxt->wp = cmd_ctxt->rbase; - } - - mhi_event = mhi_cntrl->mhi_event; - er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, - mhi_event++) { - struct mhi_ring *ring = &mhi_event->ring; - - /* Skip offload events */ - if (mhi_event->offload_ev) - continue; - - ring->rp = ring->base; - ring->wp = ring->base; - er_ctxt->rp = er_ctxt->rbase; - er_ctxt->wp = er_ctxt->rbase; - } - - /* Transition to next state */ - if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (cur_state != MHI_PM_POR) { - dev_err(dev, "Error moving to state %s from %s\n", - to_mhi_pm_state_str(MHI_PM_POR), - to_mhi_pm_state_str(cur_state)); - goto exit_sys_error_transition; - } - next_state = DEV_ST_TRANSITION_PBL; - } else { - next_state = DEV_ST_TRANSITION_READY; - } - - mhi_queue_state_transition(mhi_cntrl, next_state); - -exit_sys_error_transition: - dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state)); - - mutex_unlock(&mhi_cntrl->pm_mutex); -} - -/* Queue a new work item and schedule work */ -int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, - enum dev_st_transition state) -{ - struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); - unsigned long flags; - - if (!item) - return -ENOMEM; - - item->state = state; - spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); - list_add_tail(&item->node, &mhi_cntrl->transition_list); - spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); - - queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); - - return 0; -} - -/* SYS_ERR worker */ -void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) -{ - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - /* skip if controller supports RDDM */ - if (mhi_cntrl->rddm_image) { - dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); - return; - } - - mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); -} - -/* Device State Transition worker */ -void mhi_pm_st_worker(struct work_struct *work) -{ - struct state_transition *itr, *tmp; - LIST_HEAD(head); - struct mhi_controller *mhi_cntrl = container_of(work, - struct mhi_controller, - st_worker); - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - spin_lock_irq(&mhi_cntrl->transition_lock); - list_splice_tail_init(&mhi_cntrl->transition_list, &head); - spin_unlock_irq(&mhi_cntrl->transition_lock); - - list_for_each_entry_safe(itr, tmp, &head, node) { - list_del(&itr->node); - dev_dbg(dev, "Handling state transition: %s\n", - TO_DEV_STATE_TRANS_STR(itr->state)); - - switch (itr->state) { - case DEV_ST_TRANSITION_PBL: - write_lock_irq(&mhi_cntrl->pm_lock); - if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) - mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); - write_unlock_irq(&mhi_cntrl->pm_lock); - mhi_fw_load_handler(mhi_cntrl); - break; - case DEV_ST_TRANSITION_SBL: - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_cntrl->ee = MHI_EE_SBL; - write_unlock_irq(&mhi_cntrl->pm_lock); - /* - * The MHI devices are only created when the client - * device switches its Execution Environment (EE) to - * either SBL or AMSS states - */ - mhi_create_devices(mhi_cntrl); - if (mhi_cntrl->fbc_download) - mhi_download_amss_image(mhi_cntrl); - break; - case DEV_ST_TRANSITION_MISSION_MODE: - mhi_pm_mission_mode_transition(mhi_cntrl); - break; - case DEV_ST_TRANSITION_FP: - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_cntrl->ee = MHI_EE_FP; - write_unlock_irq(&mhi_cntrl->pm_lock); - mhi_create_devices(mhi_cntrl); - break; - case DEV_ST_TRANSITION_READY: - mhi_ready_state_transition(mhi_cntrl); - break; - case DEV_ST_TRANSITION_SYS_ERR: - mhi_pm_sys_error_transition(mhi_cntrl); - break; - case DEV_ST_TRANSITION_DISABLE: - mhi_pm_disable_transition(mhi_cntrl); - break; - default: - break; - } - kfree(itr); - } -} - -int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) -{ - struct mhi_chan *itr, *tmp; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - enum mhi_pm_state new_state; - int ret; - - if (mhi_cntrl->pm_state == MHI_PM_DISABLE) - return -EINVAL; - - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) - return -EIO; - - /* Return busy if there are any pending resources */ - if (atomic_read(&mhi_cntrl->dev_wake) || - atomic_read(&mhi_cntrl->pending_pkts)) - return -EBUSY; - - /* Take MHI out of M2 state */ - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_get(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); - - ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_cntrl->dev_state == MHI_STATE_M0 || - mhi_cntrl->dev_state == MHI_STATE_M1 || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); - - if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - dev_err(dev, - "Could not enter M0/M1 state"); - return -EIO; - } - - write_lock_irq(&mhi_cntrl->pm_lock); - - if (atomic_read(&mhi_cntrl->dev_wake) || - atomic_read(&mhi_cntrl->pending_pkts)) { - write_unlock_irq(&mhi_cntrl->pm_lock); - return -EBUSY; - } - - dev_dbg(dev, "Allowing M3 transition\n"); - new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); - if (new_state != MHI_PM_M3_ENTER) { - write_unlock_irq(&mhi_cntrl->pm_lock); - dev_err(dev, - "Error setting to PM state: %s from: %s\n", - to_mhi_pm_state_str(MHI_PM_M3_ENTER), - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - return -EIO; - } - - /* Set MHI to M3 and wait for completion */ - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); - write_unlock_irq(&mhi_cntrl->pm_lock); - dev_dbg(dev, "Waiting for M3 completion\n"); - - ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_cntrl->dev_state == MHI_STATE_M3 || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - dev_err(dev, - "Did not enter M3 state, MHI state: %s, PM state: %s\n", - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - return -EIO; - } - - /* Notify clients about entering LPM */ - list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { - mutex_lock(&itr->mutex); - if (itr->mhi_dev) - mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); - mutex_unlock(&itr->mutex); - } - - return 0; -} -EXPORT_SYMBOL_GPL(mhi_pm_suspend); - -static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) -{ - struct mhi_chan *itr, *tmp; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - enum mhi_pm_state cur_state; - int ret; - - dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - TO_MHI_STATE_STR(mhi_cntrl->dev_state)); - - if (mhi_cntrl->pm_state == MHI_PM_DISABLE) - return 0; - - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) - return -EIO; - - if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { - dev_warn(dev, "Resuming from non M3 state (%s)\n", - TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); - if (!force) - return -EINVAL; - } - - /* Notify clients about exiting LPM */ - list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { - mutex_lock(&itr->mutex); - if (itr->mhi_dev) - mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); - mutex_unlock(&itr->mutex); - } - - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); - if (cur_state != MHI_PM_M3_EXIT) { - write_unlock_irq(&mhi_cntrl->pm_lock); - dev_info(dev, - "Error setting to PM state: %s from: %s\n", - to_mhi_pm_state_str(MHI_PM_M3_EXIT), - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - return -EIO; - } - - /* Set MHI to M0 and wait for completion */ - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); - write_unlock_irq(&mhi_cntrl->pm_lock); - - ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_cntrl->dev_state == MHI_STATE_M0 || - mhi_cntrl->dev_state == MHI_STATE_M2 || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - dev_err(dev, - "Did not enter M0 state, MHI state: %s, PM state: %s\n", - TO_MHI_STATE_STR(mhi_cntrl->dev_state), - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - return -EIO; - } - - return 0; -} - -int mhi_pm_resume(struct mhi_controller *mhi_cntrl) -{ - return __mhi_pm_resume(mhi_cntrl, false); -} -EXPORT_SYMBOL_GPL(mhi_pm_resume); - -int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) -{ - return __mhi_pm_resume(mhi_cntrl, true); -} -EXPORT_SYMBOL_GPL(mhi_pm_resume_force); - -int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) -{ - int ret; - - /* Wake up the device */ - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - read_unlock_bh(&mhi_cntrl->pm_lock); - return -EIO; - } - mhi_cntrl->wake_get(mhi_cntrl, true); - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); - read_unlock_bh(&mhi_cntrl->pm_lock); - - ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_cntrl->pm_state == MHI_PM_M0 || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); - return -EIO; - } - - return 0; -} - -/* Assert device wake db */ -static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) -{ - unsigned long flags; - - /* - * If force flag is set, then increment the wake count value and - * ring wake db - */ - if (unlikely(force)) { - spin_lock_irqsave(&mhi_cntrl->wlock, flags); - atomic_inc(&mhi_cntrl->dev_wake); - if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && - !mhi_cntrl->wake_set) { - mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); - mhi_cntrl->wake_set = true; - } - spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); - } else { - /* - * If resources are already requested, then just increment - * the wake count value and return - */ - if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) - return; - - spin_lock_irqsave(&mhi_cntrl->wlock, flags); - if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && - MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && - !mhi_cntrl->wake_set) { - mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); - mhi_cntrl->wake_set = true; - } - spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); - } -} - -/* De-assert device wake db */ -static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, - bool override) -{ - unsigned long flags; - - /* - * Only continue if there is a single resource, else just decrement - * and return - */ - if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) - return; - - spin_lock_irqsave(&mhi_cntrl->wlock, flags); - if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && - MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && - mhi_cntrl->wake_set) { - mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); - mhi_cntrl->wake_set = false; - } - spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); -} - -int mhi_async_power_up(struct mhi_controller *mhi_cntrl) -{ - enum mhi_state state; - enum mhi_ee_type current_ee; - enum dev_st_transition next_state; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 interval_us = 25000; /* poll register field every 25 milliseconds */ - int ret; - - dev_info(dev, "Requested to power ON\n"); - - /* Supply default wake routines if not provided by controller driver */ - if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || - !mhi_cntrl->wake_toggle) { - mhi_cntrl->wake_get = mhi_assert_dev_wake; - mhi_cntrl->wake_put = mhi_deassert_dev_wake; - mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? - mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; - } - - mutex_lock(&mhi_cntrl->pm_mutex); - mhi_cntrl->pm_state = MHI_PM_DISABLE; - - /* Setup BHI INTVEC */ - write_lock_irq(&mhi_cntrl->pm_lock); - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); - mhi_cntrl->pm_state = MHI_PM_POR; - mhi_cntrl->ee = MHI_EE_MAX; - current_ee = mhi_get_exec_env(mhi_cntrl); - write_unlock_irq(&mhi_cntrl->pm_lock); - - /* Confirm that the device is in valid exec env */ - if (!MHI_POWER_UP_CAPABLE(current_ee)) { - dev_err(dev, "%s is not a valid EE for power on\n", - TO_MHI_EXEC_STR(current_ee)); - ret = -EIO; - goto error_exit; - } - - state = mhi_get_mhi_state(mhi_cntrl); - dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", - TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); - - if (state == MHI_STATE_SYS_ERR) { - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); - ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, - interval_us); - if (ret) { - dev_info(dev, "Failed to reset MHI due to syserr state\n"); - goto error_exit; - } - - /* - * device cleares INTVEC as part of RESET processing, - * re-program it - */ - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); - } - - ret = mhi_init_irq_setup(mhi_cntrl); - if (ret) - goto error_exit; - - /* Transition to next state */ - next_state = MHI_IN_PBL(current_ee) ? - DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; - - mhi_queue_state_transition(mhi_cntrl, next_state); - - mutex_unlock(&mhi_cntrl->pm_mutex); - - dev_info(dev, "Power on setup success\n"); - - return 0; - -error_exit: - mhi_cntrl->pm_state = MHI_PM_DISABLE; - mutex_unlock(&mhi_cntrl->pm_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(mhi_async_power_up); - -void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) -{ - enum mhi_pm_state cur_state, transition_state; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - mutex_lock(&mhi_cntrl->pm_mutex); - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_cntrl->pm_state; - if (cur_state == MHI_PM_DISABLE) { - write_unlock_irq(&mhi_cntrl->pm_lock); - mutex_unlock(&mhi_cntrl->pm_mutex); - return; /* Already powered down */ - } - - /* If it's not a graceful shutdown, force MHI to linkdown state */ - transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : - MHI_PM_LD_ERR_FATAL_DETECT; - - cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); - if (cur_state != transition_state) { - dev_err(dev, "Failed to move to state: %s from: %s\n", - to_mhi_pm_state_str(transition_state), - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - /* Force link down or error fatal detected state */ - mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; - } - - /* mark device inactive to avoid any further host processing */ - mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; - mhi_cntrl->dev_state = MHI_STATE_RESET; - - wake_up_all(&mhi_cntrl->state_event); - - write_unlock_irq(&mhi_cntrl->pm_lock); - mutex_unlock(&mhi_cntrl->pm_mutex); - - mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); - - /* Wait for shutdown to complete */ - flush_work(&mhi_cntrl->st_worker); - - free_irq(mhi_cntrl->irq[0], mhi_cntrl); -} -EXPORT_SYMBOL_GPL(mhi_power_down); - -int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) -{ - int ret = mhi_async_power_up(mhi_cntrl); - - if (ret) - return ret; - - wait_event_timeout(mhi_cntrl->state_event, - MHI_IN_MISSION_MODE(mhi_cntrl->ee) || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - - ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; - if (ret) - mhi_power_down(mhi_cntrl, false); - - return ret; -} -EXPORT_SYMBOL(mhi_sync_power_up); - -int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) -{ - struct device *dev = &mhi_cntrl->mhi_dev->dev; - int ret; - - /* Check if device is already in RDDM */ - if (mhi_cntrl->ee == MHI_EE_RDDM) - return 0; - - dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); - mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); - - /* Wait for RDDM event */ - ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_cntrl->ee == MHI_EE_RDDM, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - ret = ret ? 0 : -EIO; - - return ret; -} -EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); - -void mhi_device_get(struct mhi_device *mhi_dev) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - - mhi_dev->dev_wake++; - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); - - mhi_cntrl->wake_get(mhi_cntrl, true); - read_unlock_bh(&mhi_cntrl->pm_lock); -} -EXPORT_SYMBOL_GPL(mhi_device_get); - -int mhi_device_get_sync(struct mhi_device *mhi_dev) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - int ret; - - ret = __mhi_device_get_sync(mhi_cntrl); - if (!ret) - mhi_dev->dev_wake++; - - return ret; -} -EXPORT_SYMBOL_GPL(mhi_device_get_sync); - -void mhi_device_put(struct mhi_device *mhi_dev) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - - mhi_dev->dev_wake--; - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); - - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); -} -EXPORT_SYMBOL_GPL(mhi_device_put); diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig new file mode 100644 index 000000000000..da5cd0c9fc62 --- /dev/null +++ b/drivers/bus/mhi/host/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. + +config MHI_BUS_DEBUG + bool "Debugfs support for the MHI bus" + depends on MHI_BUS && DEBUG_FS + help + Enable debugfs support for use with the MHI transport. Allows + reading and/or modifying some values within the MHI controller + for debug and test purposes. + +config MHI_BUS_PCI_GENERIC + tristate "MHI PCI controller driver" + depends on MHI_BUS + depends on PCI + help + This driver provides MHI PCI controller driver for devices such as + Qualcomm SDX55 based PCIe modems. + diff --git a/drivers/bus/mhi/host/Makefile b/drivers/bus/mhi/host/Makefile new file mode 100644 index 000000000000..859c2f38451c --- /dev/null +++ b/drivers/bus/mhi/host/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_MHI_BUS) += mhi.o +mhi-y := init.o main.o pm.o boot.o +mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o + +obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o +mhi_pci_generic-y += pci_generic.o diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c new file mode 100644 index 000000000000..74295d3cc662 --- /dev/null +++ b/drivers/bus/mhi/host/boot.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* Setup RDDM vector table for RDDM transfer and program RXVEC */ +void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 sequence_id; + unsigned int i; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } + + dev_dbg(dev, "BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", + &mhi_buf->dma_addr, mhi_buf->len, sequence_id); +} + +/* Collect RDDM buffer during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 rx_status; + enum mhi_ee_type ee; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting RDDM buffer. After + * returning from this function, we expect the device to reset. + * + * Normaly, we read/write pm_state only after grabbing the + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee that this state change would take effect since + * we're setting it w/o grabbing pm_lock + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* + * Make sure device is not already in RDDM. In case the device asserts + * and a kernel panic follows, device will already be in RDDM. + * Do not trigger SYS ERR again and proceed with waiting for + * image download completion. + */ + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_MAX) + goto error_exit_rddm; + + if (ee != MHI_EE_RDDM) { + dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + dev_dbg(dev, "Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* Hardware reset so force device to enter RDDM */ + dev_dbg(dev, + "Did not enter RDDM, do a host req reset\n"); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, + MHI_SOC_RESET_REQ_OFFSET, + MHI_SOC_RESET_REQ); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + } + + dev_dbg(dev, + "Waiting for RDDM image download via BHIe, current EE:%s\n", + TO_MHI_EXEC_STR(ee)); + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) + return 0; + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); + +error_exit_rddm: + dev_err(dev, "RDDM transfer failed. Current EE: %s\n", + TO_MHI_EXEC_STR(ee)); + + return -EIO; +} + +/* Download RDDM image from device */ +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 rx_status; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(mhi_download_rddm_image); + +static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status, sequence_id; + int ret; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); + dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", + sequence_id); + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); + + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) + return -EIO; + + return (!ret) ? -ETIMEDOUT : 0; +} + +static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, + dma_addr_t dma_addr, + size_t size) +{ + u32 tx_status, val, session_id; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); + dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", + session_id); + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + + if (tx_status == BHI_STATUS_ERROR) { + dev_err(dev, "Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + dev_err(dev, "Reg: %s value: 0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + return (!ret) ? -ETIMEDOUT : 0; + +invalid_pm_state: + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + mhi_buf->buf, mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* Allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* Allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* Vector table is the last entry */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + vec_size, &mhi_buf->dma_addr, + GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + mhi_buf->buf, mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + buf += to_cpy; + remainder -= to_cpy; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) +{ + const struct firmware *firmware = NULL; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + const char *fw_name; + void *buf; + dma_addr_t dma_addr; + size_t size; + int i, ret; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, "Device MHI is not in valid state\n"); + return; + } + + /* save hardware info from BHI */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, + &mhi_cntrl->serial_number); + if (ret) + dev_err(dev, "Could not capture serial number via BHI\n"); + + for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), + &mhi_cntrl->oem_pk_hash[i]); + if (ret) { + dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); + break; + } + } + + /* wait for ready on pass through or any other execution environment */ + if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee)) + goto fw_load_ready_state; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + dev_err(dev, + "No firmware image defined or !sbl_size || !seg_len\n"); + goto error_fw_load; + } + + ret = request_firmware(&firmware, fw_name, dev); + if (ret) { + dev_err(dev, "Error loading firmware: %d\n", ret); + goto error_fw_load; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* SBL size provided is maximum size, not necessarily the image size */ + if (size > firmware->size) + size = firmware->size; + + buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, + GFP_KERNEL); + if (!buf) { + release_firmware(firmware); + goto error_fw_load; + } + + /* Download image using BHI */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); + dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); + + /* Error or in EDL mode, we're done */ + if (ret) { + dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); + release_firmware(firmware); + goto error_fw_load; + } + + /* Wait for ready since EDL image was loaded */ + if (fw_name == mhi_cntrl->edl_image) { + release_firmware(firmware); + goto fw_load_ready_state; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * If we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + release_firmware(firmware); + goto error_fw_load; + } + + /* Load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + release_firmware(firmware); + +fw_load_ready_state: + /* Transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + dev_err(dev, "MHI did not enter READY state\n"); + goto error_ready_state; + } + + dev_info(dev, "Wait for device to enter SBL or Mission mode\n"); + return; + +error_ready_state: + if (mhi_cntrl->fbc_download) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + +error_fw_load: + mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; + wake_up_all(&mhi_cntrl->state_event); +} + +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) +{ + struct image_info *image_info = mhi_cntrl->fbc_image; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + if (!image_info) + return -EIO; + + ret = mhi_fw_load_bhie(mhi_cntrl, + /* Vector table is the last entry */ + &image_info->mhi_buf[image_info->entries - 1]); + if (ret) { + dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); + mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; + wake_up_all(&mhi_cntrl->state_event); + } + + return ret; +} diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c new file mode 100644 index 000000000000..d818586c229d --- /dev/null +++ b/drivers/bus/mhi/host/debugfs.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static int mhi_debugfs_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + /* states */ + seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->wake_set ? "true" : "false"); + + /* counters */ + seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2, + mhi_cntrl->M3); + + seq_printf(m, " device wake: %u pending packets: %u\n", + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->pending_pkts)); + + return 0; +} + +static int mhi_debugfs_events_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int i; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; + i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index: %d is an offload event ring\n", + i); + continue; + } + + seq_printf(m, "Index: %d intmod count: %lu time: %lu", + i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> + EV_CTX_INTMODC_SHIFT, + (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> + EV_CTX_INTMODT_SHIFT); + + seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), + le64_to_cpu(er_ctxt->rlen)); + + seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), + le64_to_cpu(er_ctxt->wp)); + + seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, + &mhi_event->db_cfg.db_val); + } + + return 0; +} + +static int mhi_debugfs_channels_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) is an offload channel\n", + mhi_chan->name, mhi_chan->chan); + continue; + } + + if (!mhi_chan->mhi_dev) + continue; + + seq_printf(m, + "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", + mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & + CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, + (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> + CHAN_CTX_BRSTMODE_SHIFT, (le32_to_cpu(chan_ctxt->chcfg) & + CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); + + seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), + le32_to_cpu(chan_ctxt->erindex)); + + seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", + le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), + le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp)); + + seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", + ring->rp, ring->wp, + &mhi_chan->db_cfg.db_val); + } + + return 0; +} + +static int mhi_device_info_show(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + + seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u", + mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer", + mhi_dev->dev_wake); + + /* for transfer device types only */ + if (mhi_dev->dev_type == MHI_DEVICE_XFER) + seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)", + mhi_dev->ul_chan_id, mhi_dev->dl_chan_id); + + seq_puts((struct seq_file *)data, "\n"); + + return 0; +} + +static int mhi_debugfs_devices_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + /* Show controller and client(s) info */ + mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); + + return 0; +} + +static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + enum mhi_state state; + enum mhi_ee_type ee; + int i, ret = -EIO; + u32 val; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void __iomem *base; + } regs[] = { + { "MHI_REGLEN", MHIREGLEN, mhi_base}, + { "MHI_VER", MHIVER, mhi_base}, + { "MHI_CFG", MHICFG, mhi_base}, + { "MHI_CTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return ret; + + seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + + for (i = 0; regs[i].name; i++) { + if (!regs[i].base) + continue; + ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset, + &val); + if (ret) + continue; + + seq_printf(m, "%s: 0x%x\n", regs[i].name, val); + } + + return 0; +} + +static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + if (!mhi_is_active(mhi_cntrl)) { + seq_puts(m, "Device not ready\n"); + return -ENODEV; + } + + seq_printf(m, + "Wake count: %d\n%s\n", mhi_dev->dev_wake, + "Usage: echo get/put > device_wake to vote/unvote for M0"); + + return 0; +} + +static ssize_t mhi_debugfs_device_wake_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + char buf[16]; + int ret = -EINVAL; + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (!strncmp(buf, "get", 3)) { + ret = mhi_device_get_sync(mhi_dev); + } else if (!strncmp(buf, "put", 3)) { + mhi_device_put(mhi_dev); + ret = 0; + } + + return ret ? ret : count; +} + +static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + + seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms); + + return 0; +} + +static ssize_t mhi_debugfs_timeout_ms_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct mhi_controller *mhi_cntrl = m->private; + u32 timeout_ms; + + if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms)) + return -EINVAL; + + mhi_cntrl->timeout_ms = timeout_ms; + + return count; +} + +static int mhi_debugfs_states_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_states_show, inode->i_private); +} + +static int mhi_debugfs_events_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_events_show, inode->i_private); +} + +static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_channels_show, inode->i_private); +} + +static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_devices_show, inode->i_private); +} + +static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_regdump_show, inode->i_private); +} + +static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private); +} + +static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private); +} + +static const struct file_operations debugfs_states_fops = { + .open = mhi_debugfs_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_events_fops = { + .open = mhi_debugfs_events_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_channels_fops = { + .open = mhi_debugfs_channels_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_devices_fops = { + .open = mhi_debugfs_devices_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_regdump_fops = { + .open = mhi_debugfs_regdump_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_device_wake_fops = { + .open = mhi_debugfs_device_wake_open, + .write = mhi_debugfs_device_wake_write, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_timeout_ms_fops = { + .open = mhi_debugfs_timeout_ms_open, + .write = mhi_debugfs_timeout_ms_write, + .release = single_release, + .read = seq_read, +}; + +static struct dentry *mhi_debugfs_root; + +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->debugfs_dentry = + debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_debugfs_root); + + debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_states_fops); + debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_events_fops); + debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_channels_fops); + debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_devices_fops); + debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_regdump_fops); + debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_device_wake_fops); + debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry, + mhi_cntrl, &debugfs_timeout_ms_fops); +} + +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->debugfs_dentry); + mhi_cntrl->debugfs_dentry = NULL; +} + +void mhi_debugfs_init(void) +{ + mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL); +} + +void mhi_debugfs_exit(void) +{ + debugfs_remove_recursive(mhi_debugfs_root); +} diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c new file mode 100644 index 000000000000..d8787aaa176b --- /dev/null +++ b/drivers/bus/mhi/host/init.c @@ -0,0 +1,1431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static DEFINE_IDA(mhi_controller_ida); + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PRIMARY BOOTLOADER", + [MHI_EE_SBL] = "SECONDARY BOOTLOADER", + [MHI_EE_AMSS] = "MISSION MODE", + [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", + [MHI_EE_WFW] = "WLAN FIRMWARE", + [MHI_EE_PTHRU] = "PASS THROUGH", + [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", + [MHI_EE_FP] = "FLASH PROGRAMMER", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", + [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", +}; + +const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { + [DEV_ST_TRANSITION_PBL] = "PBL", + [DEV_ST_TRANSITION_READY] = "READY", + [DEV_ST_TRANSITION_SBL] = "SBL", + [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", + [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", + [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", + [DEV_ST_TRANSITION_DISABLE] = "DISABLE", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_M3_FAST] = "M3 FAST", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS ERROR", +}; + +const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { + [MHI_CH_STATE_TYPE_RESET] = "RESET", + [MHI_CH_STATE_TYPE_STOP] = "STOP", + [MHI_CH_STATE_TYPE_START] = "START", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_STATE_DISABLE] = "DISABLE", + [MHI_PM_STATE_POR] = "POWER ON RESET", + [MHI_PM_STATE_M0] = "M0", + [MHI_PM_STATE_M2] = "M2", + [MHI_PM_STATE_M3_ENTER] = "M?->M3", + [MHI_PM_STATE_M3] = "M3", + [MHI_PM_STATE_M3_EXIT] = "M3->M0", + [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", + [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", + [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", + [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", +}; + +const char *to_mhi_pm_state_str(u32 state) +{ + int index; + + if (state) + index = __fls(state); + + if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +static ssize_t serial_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", + mhi_cntrl->serial_number); +} +static DEVICE_ATTR_RO(serial_number); + +static ssize_t oem_pk_hash_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int i, cnt = 0; + + for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) + cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, + "OEMPKHASH[%d]: 0x%x\n", i, + mhi_cntrl->oem_pk_hash[i]); + + return cnt; +} +static DEVICE_ATTR_RO(oem_pk_hash); + +static struct attribute *mhi_dev_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_oem_pk_hash.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mhi_dev); + +/* MHI protocol requires the transfer ring to be aligned with ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + return 0; +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; + int i, ret; + + /* if controller driver has set irq_flags, use it */ + if (mhi_cntrl->irq_flags) + irq_flags = mhi_cntrl->irq_flags; + + /* Setup BHI_INTVEC IRQ */ + ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, + mhi_intvec_threaded_handler, + irq_flags, + "bhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + if (mhi_event->irq >= mhi_cntrl->nr_irqs) { + dev_err(dev, "irq %d not available for event ring\n", + mhi_event->irq); + ret = -EINVAL; + goto error_request; + } + + ret = request_irq(mhi_cntrl->irq[mhi_event->irq], + mhi_irq_handler, + irq_flags, + "mhi", mhi_event); + if (ret) { + dev_err(dev, "Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->irq], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + dma_free_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + u32 tmp; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + /* Setup channel ctxt */ + mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* Skip if it is an offload channel */ + if (mhi_chan->offload_ch) + continue; + + tmp = le32_to_cpu(chan_ctxt->chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + tmp &= ~CHAN_CTX_BRSTMODE_MASK; + tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); + tmp &= ~CHAN_CTX_POLLCFG_MASK; + tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); + chan_ctxt->chcfg = cpu_to_le32(tmp); + + chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); + chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; + } + + /* Setup event context */ + mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if it is an offload event */ + if (mhi_event->offload_ev) + continue; + + tmp = le32_to_cpu(er_ctxt->intmod); + tmp &= ~EV_CTX_INTMODC_MASK; + tmp &= ~EV_CTX_INTMODT_MASK; + tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); + er_ctxt->intmod = cpu_to_le32(tmp); + + er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); + er_ctxt->msivec = cpu_to_le32(mhi_event->irq); + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; + + /* + * If the read pointer equals to the write pointer, then the + * ring is empty + */ + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = cpu_to_le64(ring->iommu_base); + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = cpu_to_le64(ring->len); + ring->ctxt_wp = &er_ctxt->wp; + } + + /* Setup cmd context */ + ret = -ENOMEM; + mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * + NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = cpu_to_le64(ring->len); + ring->ctxt_wp = &cmd_ctxt->wp; + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + dma_free_coherent(mhi_cntrl->cntrl_dev, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0 } + }; + + dev_dbg(dev, "Initializing MHI registers\n"); + + /* Read channel db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) { + dev_err(dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } + + /* Setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); + mhi_cntrl->wake_set = false; + + /* Setup channel db address for each channel in tre_ring */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* Read event ring db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) { + dev_err(dev, "Unable to read ERDBOFF register\n"); + return -EIO; + } + + /* Setup event db address for each ev_ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* Setup DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + /* Write to MMIO registers */ + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + if (!chan_ctxt->rbase) /* Already uninitialized */ + return; + + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + vfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + tre_ring->ctxt_wp = NULL; + chan_ctxt->rbase = 0; + chan_ctxt->rlen = 0; + chan_ctxt->rp = 0; + chan_ctxt->wp = 0; + + tmp = le32_to_cpu(chan_ctxt->chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + chan_ctxt->chcfg = cpu_to_le32(tmp); + + /* Update to all cores */ + smp_wmb(); +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = vzalloc(buf_ring->len); + + if (!buf_ring->base) { + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + return -ENOMEM; + } + + tmp = le32_to_cpu(chan_ctxt->chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); + chan_ctxt->chcfg = cpu_to_le32(tmp); + + chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = cpu_to_le64(tre_ring->len); + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = 1; + + /* Update to all cores */ + smp_wmb(); + + return 0; +} + +static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + const struct mhi_event_config *event_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + int i, num; + + num = config->num_events; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Populate event ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < num; i++) { + event_cfg = &config->event_cfg[i]; + + mhi_event->er_index = i; + mhi_event->ring.elements = event_cfg->num_elements; + mhi_event->intmod = event_cfg->irq_moderation_ms; + mhi_event->irq = event_cfg->irq; + + if (event_cfg->channel != U32_MAX) { + /* This event ring has a dedicated channel */ + mhi_event->chan = event_cfg->channel; + if (mhi_event->chan >= mhi_cntrl->max_chan) { + dev_err(dev, + "Event Ring channel not available\n"); + goto error_ev_cfg; + } + + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + /* Priority is fixed to 1 for now */ + mhi_event->priority = 1; + + mhi_event->db_cfg.brstmode = event_cfg->mode; + if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_event->db_cfg.process_db = mhi_db_brstmode; + else + mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; + + mhi_event->data_type = event_cfg->data_type; + + switch (mhi_event->data_type) { + case MHI_ER_DATA: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + default: + dev_err(dev, "Event Ring type not supported\n"); + goto error_ev_cfg; + } + + mhi_event->hw_ring = event_cfg->hardware_event; + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = event_cfg->client_managed; + mhi_event->offload_ev = event_cfg->offload_channel; + mhi_event++; + } + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} + +static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + const struct mhi_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + int i; + u32 chan; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * The allocation of MHI channels can exceed 32KB in some scenarios, + * so to avoid any memory possible allocation failures, vzalloc is + * used here + */ + mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * + sizeof(*mhi_cntrl->mhi_chan)); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* Populate channel configurations */ + for (i = 0; i < config->num_channels; i++) { + struct mhi_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel %d not available\n", chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = ch_cfg->num_elements; + if (!mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring length should be bigger than + * the transfer ring length due to internal logical channels + * in device. So host can queue much more buffers than transfer + * ring length. Example, RSC channels should have a larger local + * channel length than transfer ring length. + */ + mhi_chan->buf_ring.elements = ch_cfg->local_elements; + if (!mhi_chan->buf_ring.elements) + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + mhi_chan->er_index = ch_cfg->event_ring; + mhi_chan->dir = ch_cfg->dir; + + /* + * For most channels, chtype is identical to channel directions. + * So, if it is not defined then assign channel direction to + * chtype + */ + mhi_chan->type = ch_cfg->type; + if (!mhi_chan->type) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = ch_cfg->ee_mask; + mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; + mhi_chan->lpm_notify = ch_cfg->lpm_notify; + mhi_chan->offload_ch = ch_cfg->offload_channel; + mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; + mhi_chan->pre_alloc = ch_cfg->auto_queue; + mhi_chan->wake_capable = ch_cfg->wake_capable; + + /* + * If MHI host allocates buffers, then the channel direction + * should be DMA_FROM_DEVICE + */ + if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + /* + * Bi-directional and direction less channel must be an + * offload channel + */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { + dev_err(dev, "Invalid Door bell mode\n"); + goto error_chan_cfg; + } + } + + if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_chan->db_cfg.process_db = mhi_db_brstmode; + else + mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + vfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} + +static int parse_config(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + int ret; + + /* Parse MHI channel configuration */ + ret = parse_ch_cfg(mhi_cntrl, config); + if (ret) + return ret; + + /* Parse MHI event configuration */ + ret = parse_ev_cfg(mhi_cntrl, config); + if (ret) + goto error_ev_cfg; + + mhi_cntrl->timeout_ms = config->timeout_ms; + if (!mhi_cntrl->timeout_ms) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = config->use_bounce_buf; + mhi_cntrl->buffer_len = config->buf_len; + if (!mhi_cntrl->buffer_len) + mhi_cntrl->buffer_len = MHI_MAX_MTU; + + /* By default, host is allowed to ring DB in both M0 and M2 states */ + mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; + if (config->m2_no_db) + mhi_cntrl->db_access &= ~MHI_PM_M2; + + return 0; + +error_ev_cfg: + vfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + u32 soc_info; + int ret, i; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || + !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || + !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || + !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || + !mhi_cntrl->irq || !mhi_cntrl->reg_len) + return -EINVAL; + + ret = parse_config(mhi_cntrl, config); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_event; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); + if (!mhi_cntrl->hiprio_wq) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + /* Skip for offload events */ + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + + /* used in setting bei field of TRE */ + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + mhi_chan->intmod = mhi_event->intmod; + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* Read the MHI device info */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + SOC_HW_VERSION_OFFS, &soc_info); + if (ret) + goto err_destroy_wq; + + mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> + SOC_HW_VERSION_FAM_NUM_SHFT; + mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> + SOC_HW_VERSION_DEV_NUM_SHFT; + mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> + SOC_HW_VERSION_MAJOR_VER_SHFT; + mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> + SOC_HW_VERSION_MINOR_VER_SHFT; + + mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); + if (mhi_cntrl->index < 0) { + ret = mhi_cntrl->index; + goto err_destroy_wq; + } + + /* Register controller with MHI bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); + ret = PTR_ERR(mhi_dev); + goto err_ida_free; + } + + mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + + /* Init wakeup source */ + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_release_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + mhi_create_debugfs(mhi_cntrl); + + return 0; + +err_release_dev: + put_device(&mhi_dev->dev); +err_ida_free: + ida_free(&mhi_controller_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->hiprio_wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_event: + kfree(mhi_cntrl->mhi_event); + vfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_register_controller); + +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; + unsigned int i; + + mhi_destroy_debugfs(mhi_cntrl); + + destroy_workqueue(mhi_cntrl->hiprio_wq); + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + + /* Drop the references to MHI devices created for channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->mhi_dev) + continue; + + put_device(&mhi_chan->mhi_dev->dev); + } + vfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_controller_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_unregister_controller); + +struct mhi_controller *mhi_alloc_controller(void) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); + + return mhi_cntrl; +} +EXPORT_SYMBOL_GPL(mhi_alloc_controller); + +void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_free_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 bhi_off, bhie_off; + int ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); + if (ret) { + dev_err(dev, "Error getting BHI offset\n"); + goto error_reg_offset; + } + + if (bhi_off >= mhi_cntrl->reg_len) { + dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", + bhi_off, mhi_cntrl->reg_len); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; + + if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + dev_err(dev, "Error getting BHIE offset\n"); + goto error_reg_offset; + } + + if (bhie_off >= mhi_cntrl->reg_len) { + dev_err(dev, + "BHIe offset: 0x%x is out of range: 0x%zx\n", + bhie_off, mhi_cntrl->reg_len); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; + } + + if (mhi_cntrl->rddm_size) { + /* + * This controller supports RDDM, so we need to manually clear + * BHIE RX registers since POR values are undefined. + */ + memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + /* + * Allocate RDDM table for debugging purpose if specified + */ + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + if (mhi_cntrl->rddm_image) + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + } + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +error_reg_offset: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_cntrl->bhi = NULL; + mhi_cntrl->bhie = NULL; + + mhi_deinit_dev_ctxt(mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created if the mhi_dev + * associated with it is NULL. This scenario will happen during the + * controller suspend and resume. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + + if (mhi_cntrl->mhi_dev) { + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + } else { + /* for MHI controller device, parent is the bus device (e.g. pci device) */ + dev->parent = mhi_cntrl->cntrl_dev; + } + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_wake = 0; + + return mhi_dev; +} + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + int ret; + + /* Bring device out of LPM */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; + + if (ul_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + /* For non-offload channels then xfer_cb should be provided */ + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + goto exit_probe; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + } + + ret = -EINVAL; + if (dl_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + /* For non-offload channels then xfer_cb should be provided */ + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + goto exit_probe; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * If the channel event ring is managed by client, then + * status_cb must be provided so that the framework can + * notify pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + goto exit_probe; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + } + + /* Call the user provided probe function */ + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + if (ret) + goto exit_probe; + + mhi_device_put(mhi_dev); + + return ret; + +exit_probe: + mhi_unprepare_from_transfer(mhi_dev); + + mhi_device_put(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum mhi_ch_state ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* Wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* Set the channel state to disabled */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + /* Reset the non-offload channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + mhi_drv->remove(mhi_dev); + + /* De-init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + if ((ch_state[dir] == MHI_CH_STATE_ENABLED || + ch_state[dir] == MHI_CH_STATE_STOP) && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + while (mhi_dev->dev_wake) + mhi_device_put(mhi_dev); + + return 0; +} + +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->owner = owner; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_unregister); + +static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_bus_type = { + .name = "mhi", + .dev_name = "mhi", + .match = mhi_match, + .uevent = mhi_uevent, + .dev_groups = mhi_dev_groups, +}; + +static int __init mhi_init(void) +{ + mhi_debugfs_init(); + return bus_register(&mhi_bus_type); +} + +static void __exit mhi_exit(void) +{ + mhi_debugfs_exit(); + bus_unregister(&mhi_bus_type); +} + +postcore_initcall(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h new file mode 100644 index 000000000000..37c39bf1c7a9 --- /dev/null +++ b/drivers/bus/mhi/host/internal.h @@ -0,0 +1,723 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include + +extern struct bus_type mhi_bus_type; + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET (0xB0) +#define MHI_SOC_RESET_REQ BIT(0) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +#define SOC_HW_VERSION_OFFS (0x224) +#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) +#define SOC_HW_VERSION_FAM_NUM_SHFT (28) +#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) +#define SOC_HW_VERSION_DEV_NUM_SHFT (16) +#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) +#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) +#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) +#define SOC_HW_VERSION_MINOR_VER_SHFT (0) + +#define EV_CTX_RESERVED_MASK GENMASK(7, 0) +#define EV_CTX_INTMODC_MASK GENMASK(15, 8) +#define EV_CTX_INTMODC_SHIFT 8 +#define EV_CTX_INTMODT_MASK GENMASK(31, 16) +#define EV_CTX_INTMODT_SHIFT 16 +struct mhi_event_ctxt { + __le32 intmod; + __le32 ertype; + __le32 msivec; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) +#define CHAN_CTX_CHSTATE_SHIFT 0 +#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) +#define CHAN_CTX_BRSTMODE_SHIFT 8 +#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) +#define CHAN_CTX_POLLCFG_SHIFT 10 +#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) +struct mhi_chan_ctxt { + __le32 chcfg; + __le32 chtype; + __le32 erindex; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; +}; + +struct mhi_tre { + __le64 ptr; + __le32 dword[2]; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, +}; + +/* No operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16)) + +/* Channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16))) + +/* Channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16))) + +/* Channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_START_CHAN << 16))) + +#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) +#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) + +/* Event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) +#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) +#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) +#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) + +/* Transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain)) + +/* RSC transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) +#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, /* End of transfer event */ + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, /* End of block event */ + MHI_EV_CC_OOB = 0x5, /* Out of block event */ + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum mhi_ch_state { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +enum mhi_ch_state_type { + MHI_CH_STATE_TYPE_RESET, + MHI_CH_STATE_TYPE_STOP, + MHI_CH_STATE_TYPE_START, + MHI_CH_STATE_TYPE_MAX, +}; + +extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; +#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ + "INVALID_STATE" : \ + mhi_ch_state_type_str[(state)]) + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ + mode != MHI_DB_BRST_ENABLE) + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) +#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS) +#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL) +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ + ee == MHI_EE_FP) + +enum dev_st_transition { + DEV_ST_TRANSITION_PBL, + DEV_ST_TRANSITION_READY, + DEV_ST_TRANSITION_SBL, + DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_FP, + DEV_ST_TRANSITION_SYS_ERR, + DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_MAX, +}; + +extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; +#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : dev_state_tran_str[state]) + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +/* internal power states */ +enum mhi_pm_state { + MHI_PM_STATE_DISABLE, + MHI_PM_STATE_POR, + MHI_PM_STATE_M0, + MHI_PM_STATE_M2, + MHI_PM_STATE_M3_ENTER, + MHI_PM_STATE_M3, + MHI_PM_STATE_M3_EXIT, + MHI_PM_STATE_FW_DL_ERR, + MHI_PM_STATE_SYS_ERR_DETECT, + MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SHUTDOWN_PROCESS, + MHI_PM_STATE_LD_ERR_FATAL_DETECT, + MHI_PM_STATE_MAX +}; + +#define MHI_PM_DISABLE BIT(0) +#define MHI_PM_POR BIT(1) +#define MHI_PM_M0 BIT(2) +#define MHI_PM_M2 BIT(3) +#define MHI_PM_M3_ENTER BIT(4) +#define MHI_PM_M3 BIT(5) +#define MHI_PM_M3_EXIT BIT(6) +/* firmware download failure state */ +#define MHI_PM_FW_DL_ERR BIT(7) +#define MHI_PM_SYS_ERR_DETECT BIT(8) +#define MHI_PM_SYS_ERR_PROCESS BIT(9) +#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +/* link not accessible */ +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ + mhi_cntrl->db_access) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +#define NR_OF_CMD_RINGS 1 +#define CMD_EL_PER_RING 128 +#define PRIMARY_CMD_RING 0 +#define MHI_DEV_WAKE_DB 127 +#define MHI_MAX_MTU 0xffff +#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) + +enum mhi_er_type { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum mhi_db_brst_mode brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum mhi_pm_state from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum dev_st_transition state; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + __le64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + void *v_addr; + void *bb_addr; + void *wp; + void *cb_buf; + dma_addr_t p_addr; + size_t len; + enum dma_data_direction dir; + bool used; /* Indicates whether the buffer is used or not */ + bool pre_mapped; /* Already pre-mapped by client */ +}; + +struct mhi_event { + struct mhi_controller *mhi_cntrl; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel (optional) */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + struct tasklet_struct task; + spinlock_t lock; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ +}; + +struct mhi_chan { + const char *name; + /* + * Important: When consuming, increment tre_ring first and when + * releasing, decrement buf_ring first. If tre_ring has space, buf_ring + * is guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 chan; + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum mhi_ch_ee_mask ee_mask; + enum mhi_ch_state ch_state; + enum mhi_ev_ccs ccs; + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool wake_capable; +}; + +/* Default MHI timeout */ +#define MHI_TIMEOUT_MS (1000) + +/* debugfs related functions */ +#ifdef CONFIG_MHI_BUS_DEBUG +void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_debugfs_init(void); +void mhi_debugfs_exit(void); +#else +static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) +{ +} + +static inline void mhi_debugfs_init(void) +{ +} + +static inline void mhi_debugfs_exit(void) +{ +} +#endif + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); + +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +/* Power management APIs */ +enum mhi_pm_state __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state); +const char *to_mhi_pm_state_str(u32 state); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd); +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); +static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) +{ + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); +} + +static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) +{ + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); +} + +/* Register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t db_val); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t db_val); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 val, u32 delayus); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Initialization methods */ +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info); +void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); + +/* Automatically allocate and queue inbound buffers */ +#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, unsigned int flags); + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Event processing methods */ +void mhi_ctrl_ev_task(unsigned long data); +void mhi_ev_task(unsigned long data); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); + +/* ISR handlers */ +irqreturn_t mhi_irq_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_handler(int irq_number, void *dev); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags); +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +#endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c new file mode 100644 index 000000000000..85f4f7c8d7c6 --- /dev/null +++ b/drivers/bus/mhi/host/main.c @@ -0,0 +1,1687 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out) +{ + return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 val, u32 delayus) +{ + int ret; + u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, + &out); + if (ret) + return ret; + + if (out == val) + return 0; + + fsleep(delayus); + } + + return -ETIMEDOUT; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val) +{ + mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); + db_cfg->db_mode = 0; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = cpu_to_le64(db); + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + + /* + * Writes to the new ring element must be visible to the hardware + * before letting h/w know there is new element to fetch. + */ + dma_wmb(); + *ring->ctxt_wp = cpu_to_le64(db); + + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, + ring->db_addr, db); +} + +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} +EXPORT_SYMBOL_GPL(mhi_get_exec_env); + +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} +EXPORT_SYMBOL_GPL(mhi_get_mhi_state); + +void mhi_soc_reset(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->reset) { + mhi_cntrl->reset(mhi_cntrl); + return; + } + + /* Generic MHI SoC reset */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, + MHI_SOC_RESET_REQ); +} +EXPORT_SYMBOL_GPL(mhi_soc_reset); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, + buf_info->v_addr, buf_info->len, + buf_info->dir); + if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, + buf_info->bb_addr, buf_info->p_addr); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) { + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + } else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + /* smp update */ + smp_wmb(); +} + +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) +{ + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; +} + +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_chan *ul_chan, *dl_chan; + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + enum mhi_ee_type ee = MHI_EE_MAX; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + /* + * If execution environment is specified, remove only those devices that + * started in them based on ee_mask for the channels as we move on to a + * different execution environment + */ + if (data) + ee = *(enum mhi_ee_type *)data; + + /* + * For the suspend and resume case, this function will get called + * without mhi_unregister_controller(). Hence, we need to drop the + * references to mhi_dev created for ul and dl channels. We can + * be sure that there will be no instances of mhi_dev left after + * this. + */ + if (ul_chan) { + if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&ul_chan->mhi_dev->dev); + } + + if (dl_chan) { + if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&dl_chan->mhi_dev->dev); + } + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +int mhi_get_free_desc_count(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL_GPL(mhi_get_free_desc_count); + +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} +EXPORT_SYMBOL_GPL(mhi_notify); + +/* Bind MHI channels to MHI devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) + return; + + mhi_dev->dev_type = MHI_DEVICE_XFER; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + break; + case DMA_FROM_DEVICE: + /* We use dl_chan as offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + break; + default: + dev_err(dev, "Direction not supported\n"); + put_device(&mhi_dev->dev); + return; + } + + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + } + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + } + } + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + /* Init wakeup source if available */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + } +} + +irqreturn_t mhi_irq_handler(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); + void *dev_rp; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return IRQ_HANDLED; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + + /* Only proceed if event ring has pending events */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* For client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); + } else { + tasklet_schedule(&mhi_event->task); + } + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) +{ + struct mhi_controller *mhi_cntrl = priv; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_pm_state pm_state = 0; + enum mhi_ee_type ee; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto exit_intvec; + } + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); + + if (state == MHI_STATE_SYS_ERR) { + dev_dbg(dev, "System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) + goto exit_intvec; + + switch (ee) { + case MHI_EE_RDDM: + /* proceed if power down is not already in progress */ + if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + mhi_cntrl->ee = ee; + wake_up_all(&mhi_cntrl->state_event); + } + break; + case MHI_EE_PBL: + case MHI_EE_EDL: + case MHI_EE_PTHRU: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); + mhi_cntrl->ee = ee; + wake_up_all(&mhi_cntrl->state_event); + mhi_pm_sys_err_handler(mhi_cntrl); + break; + default: + wake_up_all(&mhi_cntrl->state_event); + mhi_pm_sys_err_handler(mhi_cntrl); + break; + } + +exit_intvec: + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + + /* Wake up events waiting for state change */ + wake_up_all(&mhi_cntrl->state_event); + + return IRQ_WAKE_THREAD; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + dma_addr_t ctxt_wp; + + /* Update the WP */ + ring->wp += ring->el_size; + ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; + + if (ring->wp >= (ring->base + ring->len)) { + ring->wp = ring->base; + ctxt_wp = ring->iommu_base; + } + + *ring->ctxt_wp = cpu_to_le64(ctxt_wp); + + /* Update the RP */ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* Update to all cores */ + smp_wmb(); +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result; + unsigned long flags = 0; + u32 ev_code; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * If it's a DB Event then we need to grab the lock + * with preemption disabled and as a write because we + * have to update db register and there are chances that + * another thread could be doing the same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + if (!is_valid_ring_ptr(tre_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points outside of the tre ring\n"); + break; + } + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + result.dir = mhi_chan->dir; + + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* If it's the last TRE, get length from the event */ + if (local_rp == ev_tre) + xfer_len = MHI_TRE_GET_EV_LEN(event); + else + xfer_len = buf_info->len; + + /* Unmap if it's not pre-mapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = + min_t(u16, xfer_len, buf_info->len); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + atomic_dec(&mhi_cntrl->pending_pkts); + /* Release the reference got from mhi_queue() */ + mhi_cntrl->runtime_put(mhi_cntrl); + } + + /* + * Recycle the buffer if buffer is pre-allocated, + * if there is an error, not much we can do apart + * from dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, + mhi_chan->dir, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + dev_err(dev, + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long pm_lock_flags; + + mhi_chan->db_cfg.db_mode = 1; + read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); + break; + } + case MHI_EV_CC_BAD_TRE: + default: + dev_err(dev, "Unknown event 0x%x\n", ev_code); + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* Received out of bound cookie */ + WARN_ON(cookie >= buf_ring->len); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* truncate to buf len if xfer_len is larger */ + result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + WARN_ON(!buf_info->used); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + u32 chan; + + if (!is_valid_ring_ptr(mhi_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points outside of the cmd ring\n"); + return; + } + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } else { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Completion packet for invalid channel ID: %d\n", chan); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 chan; + int count = 0; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); + + /* + * This is a quick check to avoid unnecessary event processing + * in case MHI is already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + switch (type) { + case MHI_PKT_TYPE_BW_REQ_EVENT: + { + struct mhi_link_info *link_info; + + link_info = &mhi_cntrl->mhi_link_info; + write_lock_irq(&mhi_cntrl->pm_lock); + link_info->target_link_speed = + MHI_TRE_GET_EV_LINKSPEED(local_rp); + link_info->target_link_width = + MHI_TRE_GET_EV_LINKWIDTH(local_rp); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_dbg(dev, "Received BW_REQ event\n"); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); + break; + } + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + dev_dbg(dev, "State change event to state: %s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum mhi_pm_state pm_state; + + dev_dbg(dev, "System error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + mhi_pm_sys_err_handler(mhi_cntrl); + break; + } + default: + dev_err(dev, "Invalid state: %s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum dev_st_transition st = DEV_ST_TRANSITION_MAX; + enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); + + dev_dbg(dev, "Received EE event: %s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = DEV_ST_TRANSITION_SBL; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = DEV_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_FP: + st = DEV_ST_TRANSITION_FP; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + dev_err(dev, + "Unhandled EE event: 0x%x\n", type); + } + if (st != DEV_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + + break; + } + case MHI_PKT_TYPE_TX_EVENT: + chan = MHI_TRE_GET_EV_CHID(local_rp); + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + if (!mhi_chan->configured) + break; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + break; + default: + dev_err(dev, "Unhandled event type: %d\n", type); + break; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + + ptr = le64_to_cpu(er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + + WARN_ON(chan >= mhi_cntrl->max_chan); + + /* + * Only process the event ring elements whose channel + * ID is within the maximum supported range. + */ + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + + ptr = le64_to_cpu(er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_pm_state pm_state = 0; + int ret; + + /* + * We can check PM state w/o a lock here because there is no way + * PM state can change from reg access valid to no access while this + * thread being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + /* + * We may have a pending event but not allowed to + * process it since we are probably in a suspended state, + * so trigger a resume. + */ + mhi_trigger_resume(mhi_cntrl); + + return; + } + + /* Process ctrl events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * We received an IRQ but no events to process, maybe device went to + * SYS_ERR state? Check the state to confirm. + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + dev_dbg(dev, "System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + mhi_pm_sys_err_handler(mhi_cntrl); + } +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, + enum dma_data_direction dir, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + unsigned long flags; + int ret; + + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) + return -EIO; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + ret = mhi_is_ring_full(mhi_cntrl, tre_ring); + if (unlikely(ret)) { + ret = -EAGAIN; + goto exit_unlock; + } + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); + if (unlikely(ret)) + goto exit_unlock; + + /* Packet is queued, take a usage ref to exit M3 if necessary + * for host->device buffer, balanced put is done on buffer completion + * for device->host buffer, balanced put is after ringing the DB + */ + mhi_cntrl->runtime_get(mhi_cntrl); + + /* Assert dev_wake (to exit/prevent M1/M2)*/ + mhi_cntrl->wake_toggle(mhi_cntrl); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + + if (dir == DMA_FROM_DEVICE) + mhi_cntrl->runtime_put(mhi_cntrl); + +exit_unlock: + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return ret; +} + +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags) +{ + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_buf_info buf_info = { }; + + buf_info.v_addr = skb->data; + buf_info.cb_buf = skb; + buf_info.len = len; + + if (unlikely(mhi_chan->pre_alloc)) + return -EINVAL; + + return mhi_queue(mhi_dev, &buf_info, dir, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_skb); + +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_buf_info buf_info = { }; + + buf_info.p_addr = mhi_buf->dma_addr; + buf_info.cb_buf = mhi_buf; + buf_info.pre_mapped = true; + buf_info.len = len; + + if (unlikely(mhi_chan->pre_alloc)) + return -EINVAL; + + return mhi_queue(mhi_dev, &buf_info, dir, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_dma); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_buf_info *info, enum mhi_flags flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + WARN_ON(buf_info->used); + buf_info->pre_mapped = info->pre_mapped; + if (info->pre_mapped) + buf_info->p_addr = info->p_addr; + else + buf_info->v_addr = info->v_addr; + buf_info->cb_buf = info->cb_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = info->len; + + if (!info->pre_mapped) { + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + } + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_buf_info buf_info = { }; + + buf_info.v_addr = buf; + buf_info.cb_buf = buf; + buf_info.len = len; + + return mhi_queue(mhi_dev, &buf_info, dir, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_buf); + +bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return mhi_is_ring_full(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL_GPL(mhi_queue_is_full); + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int chan = 0; + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_STOP_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + default: + dev_err(dev, "Command not supported\n"); + break; + } + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_ch_state_type to_state) +{ + struct device *dev = &mhi_chan->mhi_dev->dev; + enum mhi_cmd_type cmd = MHI_CMD_NOP; + int ret; + + dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, + TO_CH_STATE_TYPE_STR(to_state)); + + switch (to_state) { + case MHI_CH_STATE_TYPE_RESET: + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_ENABLED && + mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { + write_unlock_irq(&mhi_chan->lock); + return -EINVAL; + } + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + cmd = MHI_CMD_RESET_CHAN; + break; + case MHI_CH_STATE_TYPE_STOP: + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + return -EINVAL; + + cmd = MHI_CMD_STOP_CHAN; + break; + case MHI_CH_STATE_TYPE_START: + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_DISABLED) + return -EINVAL; + + cmd = MHI_CMD_START_CHAN; + break; + default: + dev_err(dev, "%d: Channel state update to %s not allowed\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + return -EINVAL; + } + + /* bring host and device out of suspended states */ + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + return ret; + mhi_cntrl->runtime_get(mhi_cntrl); + + reinit_completion(&mhi_chan->completion); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); + if (ret) { + dev_err(dev, "%d: Failed to send %s channel command\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + goto exit_channel_update; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + dev_err(dev, + "%d: Failed to receive %s channel command completion\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + ret = -EIO; + goto exit_channel_update; + } + + ret = 0; + + if (to_state != MHI_CH_STATE_TYPE_RESET) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? + MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; + write_unlock_irq(&mhi_chan->lock); + } + + dev_dbg(dev, "%d: Channel state change to %s successful\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + +exit_channel_update: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + return ret; +} + +static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + struct device *dev = &mhi_chan->mhi_dev->dev; + + mutex_lock(&mhi_chan->mutex); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); + goto exit_unprepare_channel; + } + + /* no more processing events for this channel */ + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_RESET); + if (ret) + dev_err(dev, "%d: Failed to reset channel, still resetting\n", + mhi_chan->chan); + +exit_unprepare_channel: + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); + + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, unsigned int flags) +{ + int ret = 0; + struct device *dev = &mhi_chan->mhi_dev->dev; + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* Check of client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) + goto error_init_chan; + } + + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_START); + if (ret) + goto error_pm_state; + + if (mhi_chan->dir == DMA_FROM_DEVICE) + mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); + + /* Pre-allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + struct mhi_buf_info info = { }; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* Prepare transfer descriptors */ + info.v_addr = buf; + info.cb_buf = buf; + info.len = len; + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); + if (ret) { + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + mutex_unlock(&mhi_chan->mutex); + + return 0; + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + mutex_unlock(&mhi_chan->mutex); + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) + +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + unsigned long flags; + dma_addr_t ptr; + + dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + + ptr = le64_to_cpu(er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + dev_rp = ev_ring->rp; + } else { + dev_rp = mhi_to_virtual(ev_ring, ptr); + } + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + dev_dbg(dev, "Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* Reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) { + atomic_dec(&mhi_cntrl->pending_pkts); + /* Release the reference got from mhi_queue() */ + mhi_cntrl->runtime_put(mhi_cntrl); + } + + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* Nothing to reset, client doesn't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); +} + +static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); + if (ret) + goto error_open_chan; + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} + +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + return __mhi_prepare_for_transfer(mhi_dev, 0); +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); + +int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev) +{ + return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + if (!mhi_chan) + continue; + + mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } +} +EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +int mhi_poll(struct mhi_device *mhi_dev, u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_poll); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c new file mode 100644 index 000000000000..9527b7d63840 --- /dev/null +++ b/drivers/bus/mhi/host/pci_generic.c @@ -0,0 +1,1105 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MHI PCI driver - MHI over PCI controller driver + * + * This module is a generic driver for registering MHI-over-PCI devices, + * such as PCIe QCOM modems. + * + * Copyright (C) 2020 Linaro Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MHI_PCI_DEFAULT_BAR_NUM 0 + +#define MHI_POST_RESET_DELAY_MS 2000 + +#define HEALTH_CHECK_PERIOD (HZ * 2) + +/** + * struct mhi_pci_dev_info - MHI PCI device specific information + * @config: MHI controller configuration + * @name: name of the PCI module + * @fw: firmware path (if any) + * @edl: emergency download mode firmware path (if any) + * @bar_num: PCI base address register to use for MHI MMIO register space + * @dma_data_width: DMA transfer word size (32 or 64 bits) + * @mru_default: default MRU size for MBIM network packets + * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead + * of inband wake support (such as sdx24) + */ +struct mhi_pci_dev_info { + const struct mhi_controller_config *config; + const char *name; + const char *fw; + const char *edl; + unsigned int bar_num; + unsigned int dma_data_width; + unsigned int mru_default; + bool sideband_wake; +}; + +#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + .auto_queue = true, \ + } + +#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 0, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_CTRL, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_ENABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = true, \ + } \ + +#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_ENABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = true, \ + } + +#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_SBL), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_SBL), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_FP), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_FP), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 5, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 1, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = true, \ + .client_managed = false, \ + .offload_channel = false, \ + .channel = ch_num, \ + } + +static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), +}; + +static struct mhi_event_config modem_qcom_v1_mhi_events[] = { + /* first ring is control+data ring */ + MHI_EVENT_CONFIG_CTRL(0, 64), + /* DIAG dedicated event ring */ + MHI_EVENT_CONFIG_DATA(1, 128), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { + .max_channels = 128, + .timeout_ms = 8000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { + .name = "qcom-sdx65m", + .fw = "qcom/sdx65m/xbl.elf", + .edl = "qcom/sdx65m/edl.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { + .name = "qcom-sdx55m", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { + .name = "qcom-sdx24", + .edl = "qcom/prog_firehose_sdx24.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = true, +}; + +static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0), + MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0), + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + /* The EDL firmware is a flash-programmer exposing firehose protocol */ + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_quectel_em1xx_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) +}; + +static const struct mhi_controller_config modem_quectel_em1xx_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels), + .ch_cfg = mhi_quectel_em1xx_channels, + .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events), + .event_cfg = mhi_quectel_em1xx_events, +}; + +static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { + .name = "quectel-em1xx", + .edl = "qcom/prog_firehose_sdx24.mbn", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + +static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_foxconn_sdx55_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) +}; + +static const struct mhi_controller_config modem_foxconn_sdx55_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), + .ch_cfg = mhi_foxconn_sdx55_channels, + .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), + .event_cfg = mhi_foxconn_sdx55_events, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { + .name = "foxconn-sdx55", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_mv31_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), + /* MBIM Control Channel */ + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), + /* MBIM Data Channel */ + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), +}; + +static struct mhi_event_config mhi_mv31_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 256), + MHI_EVENT_CONFIG_DATA(1, 256), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), +}; + +static const struct mhi_controller_config modem_mv31_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_mv31_channels), + .ch_cfg = mhi_mv31_channels, + .num_events = ARRAY_SIZE(mhi_mv31_events), + .event_cfg = mhi_mv31_events, +}; + +static const struct mhi_pci_dev_info mhi_mv31_info = { + .name = "cinterion-mv31", + .config = &modem_mv31_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, +}; + +static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), +}; + +static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { + /* first ring is control+data and DIAG ring */ + MHI_EVENT_CONFIG_CTRL(0, 2048), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static const struct mhi_controller_config modem_sierra_em919x_config = { + .max_channels = 128, + .timeout_ms = 24000, + .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), + .ch_cfg = mhi_sierra_em919x_channels, + .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), + .event_cfg = modem_sierra_em919x_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { + .name = "sierra-em919x", + .config = &modem_sierra_em919x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + +static const struct pci_device_id mhi_pci_id_table[] = { + /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), + .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, + /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* DW5930e (sdx55), With eSIM, It's also T99W175 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W175 (sdx55), Based on Qualcomm new baseline */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* MV31-W (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00b3), + .driver_data = (kernel_ulong_t) &mhi_mv31_info }, + { } +}; +MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); + +enum mhi_pci_device_status { + MHI_PCI_DEV_STARTED, + MHI_PCI_DEV_SUSPENDED, +}; + +struct mhi_pci_device { + struct mhi_controller mhi_cntrl; + struct pci_saved_state *pci_state; + struct work_struct recovery_work; + struct timer_list health_check_timer; + unsigned long status; +}; + +static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 *out) +{ + *out = readl(addr); + return 0; +} + +static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 val) +{ + writel(val, addr); +} + +static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + + /* Nothing to do for now */ + switch (cb) { + case MHI_CB_FATAL_ERROR: + case MHI_CB_SYS_ERROR: + dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb); + pm_runtime_forbid(&pdev->dev); + break; + case MHI_CB_EE_MISSION_MODE: + pm_runtime_allow(&pdev->dev); + break; + default: + break; + } +} + +static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force) +{ + /* no-op */ +} + +static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override) +{ + /* no-op */ +} + +static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl) +{ + /* no-op */ +} + +static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + u16 vendor = 0; + + if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + return false; + + if (vendor == (u16) ~0 || vendor == 0) + return false; + + return true; +} + +static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, + unsigned int bar_num, u64 dma_mask) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int err; + + err = pci_assign_resource(pdev, bar_num); + if (err) + return err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); + return err; + } + + err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); + if (err) { + dev_err(&pdev->dev, "failed to map pci region: %d\n", err); + return err; + } + mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; + mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); + + err = dma_set_mask_and_coherent(&pdev->dev, dma_mask); + if (err) { + dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); + return err; + } + + pci_set_master(pdev); + + return 0; +} + +static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *mhi_cntrl_config) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int nr_vectors, i; + int *irq; + + /* + * Alloc one MSI vector for BHI + one vector per event ring, ideally... + * No explicit pci_free_irq_vectors required, done by pcim_release. + */ + mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; + + nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); + if (nr_vectors < 0) { + dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", + nr_vectors); + return nr_vectors; + } + + if (nr_vectors < mhi_cntrl->nr_irqs) { + dev_warn(&pdev->dev, "using shared MSI\n"); + + /* Patch msi vectors, use only one (shared) */ + for (i = 0; i < mhi_cntrl_config->num_events; i++) + mhi_cntrl_config->event_cfg[i].irq = 0; + mhi_cntrl->nr_irqs = 1; + } + + irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + for (i = 0; i < mhi_cntrl->nr_irqs; i++) { + int vector = i >= nr_vectors ? (nr_vectors - 1) : i; + + irq[i] = pci_irq_vector(pdev, vector); + } + + mhi_cntrl->irq = irq; + + return 0; +} + +static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) +{ + /* The runtime_get() MHI callback means: + * Do whatever is requested to leave M3. + */ + return pm_runtime_get(mhi_cntrl->cntrl_dev); +} + +static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) +{ + /* The runtime_put() MHI callback means: + * Device can be moved in M3 state. + */ + pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); + pm_runtime_put(mhi_cntrl->cntrl_dev); +} + +static void mhi_pci_recovery_work(struct work_struct *work) +{ + struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, + recovery_work); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int err; + + dev_warn(&pdev->dev, "device recovery started\n"); + + del_timer(&mhi_pdev->health_check_timer); + pm_runtime_forbid(&pdev->dev); + + /* Clean up MHI state */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, false); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + pci_set_power_state(pdev, PCI_D0); + pci_load_saved_state(pdev, mhi_pdev->pci_state); + pci_restore_state(pdev); + + if (!mhi_pci_is_alive(mhi_cntrl)) + goto err_try_reset; + + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) + goto err_try_reset; + + err = mhi_sync_power_up(mhi_cntrl); + if (err) + goto err_unprepare; + + dev_dbg(&pdev->dev, "Recovery completed\n"); + + set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + return; + +err_unprepare: + mhi_unprepare_after_power_down(mhi_cntrl); +err_try_reset: + if (pci_reset_function(pdev)) + dev_err(&pdev->dev, "Recovery failed\n"); +} + +static void health_check(struct timer_list *t) +{ + struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || + test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) + return; + + if (!mhi_pci_is_alive(mhi_cntrl)) { + dev_err(mhi_cntrl->cntrl_dev, "Device died\n"); + queue_work(system_long_wq, &mhi_pdev->recovery_work); + return; + } + + /* reschedule in two seconds */ + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); +} + +static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; + const struct mhi_controller_config *mhi_cntrl_config; + struct mhi_pci_device *mhi_pdev; + struct mhi_controller *mhi_cntrl; + int err; + + dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); + + /* mhi_pdev.mhi_cntrl must be zero-initialized */ + mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); + if (!mhi_pdev) + return -ENOMEM; + + INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); + timer_setup(&mhi_pdev->health_check_timer, health_check, 0); + + mhi_cntrl_config = info->config; + mhi_cntrl = &mhi_pdev->mhi_cntrl; + + mhi_cntrl->cntrl_dev = &pdev->dev; + mhi_cntrl->iova_start = 0; + mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); + mhi_cntrl->fw_image = info->fw; + mhi_cntrl->edl_image = info->edl; + + mhi_cntrl->read_reg = mhi_pci_read_reg; + mhi_cntrl->write_reg = mhi_pci_write_reg; + mhi_cntrl->status_cb = mhi_pci_status_cb; + mhi_cntrl->runtime_get = mhi_pci_runtime_get; + mhi_cntrl->runtime_put = mhi_pci_runtime_put; + mhi_cntrl->mru = info->mru_default; + + if (info->sideband_wake) { + mhi_cntrl->wake_get = mhi_pci_wake_get_nop; + mhi_cntrl->wake_put = mhi_pci_wake_put_nop; + mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; + } + + err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); + if (err) + return err; + + err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); + if (err) + return err; + + pci_set_drvdata(pdev, mhi_pdev); + + /* Have stored pci confspace at hand for restore in sudden PCI error. + * cache the state locally and discard the PCI core one. + */ + pci_save_state(pdev); + mhi_pdev->pci_state = pci_store_saved_state(pdev); + pci_load_saved_state(pdev, NULL); + + pci_enable_pcie_error_reporting(pdev); + + err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); + if (err) + goto err_disable_reporting; + + /* MHI bus does not power up the controller by default */ + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to prepare MHI controller\n"); + goto err_unregister; + } + + err = mhi_sync_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + goto err_unprepare; + } + + set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); + + /* start health check */ + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + + /* Only allow runtime-suspend if PME capable (for wakeup) */ + if (pci_pme_capable(pdev, PCI_D3hot)) { + pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + } + + return 0; + +err_unprepare: + mhi_unprepare_after_power_down(mhi_cntrl); +err_unregister: + mhi_unregister_controller(mhi_cntrl); +err_disable_reporting: + pci_disable_pcie_error_reporting(pdev); + + return err; +} + +static void mhi_pci_remove(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + del_timer_sync(&mhi_pdev->health_check_timer); + cancel_work_sync(&mhi_pdev->recovery_work); + + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, true); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + /* balancing probe put_noidle */ + if (pci_pme_capable(pdev, PCI_D3hot)) + pm_runtime_get_noresume(&pdev->dev); + + mhi_unregister_controller(mhi_cntrl); + pci_disable_pcie_error_reporting(pdev); +} + +static void mhi_pci_shutdown(struct pci_dev *pdev) +{ + mhi_pci_remove(pdev); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void mhi_pci_reset_prepare(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + dev_info(&pdev->dev, "reset\n"); + + del_timer(&mhi_pdev->health_check_timer); + + /* Clean up MHI state */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, false); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + /* cause internal device reset */ + mhi_soc_reset(mhi_cntrl); + + /* Be sure device reset has been executed */ + msleep(MHI_POST_RESET_DELAY_MS); +} + +static void mhi_pci_reset_done(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + int err; + + /* Restore initial known working PCI state */ + pci_load_saved_state(pdev, mhi_pdev->pci_state); + pci_restore_state(pdev); + + /* Is device status available ? */ + if (!mhi_pci_is_alive(mhi_cntrl)) { + dev_err(&pdev->dev, "reset failed\n"); + return; + } + + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to prepare MHI controller\n"); + return; + } + + err = mhi_sync_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + mhi_unprepare_after_power_down(mhi_cntrl); + return; + } + + set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); +} + +static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + dev_err(&pdev->dev, "PCI error detected, state = %u\n", state); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + /* Clean up MHI state */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, false); + mhi_unprepare_after_power_down(mhi_cntrl); + } else { + /* Nothing to do */ + return PCI_ERS_RESULT_RECOVERED; + } + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev) +{ + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static void mhi_pci_io_resume(struct pci_dev *pdev) +{ + struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); + + dev_err(&pdev->dev, "PCI slot reset done\n"); + + queue_work(system_long_wq, &mhi_pdev->recovery_work); +} + +static const struct pci_error_handlers mhi_pci_err_handler = { + .error_detected = mhi_pci_error_detected, + .slot_reset = mhi_pci_slot_reset, + .resume = mhi_pci_io_resume, + .reset_prepare = mhi_pci_reset_prepare, + .reset_done = mhi_pci_reset_done, +}; + +static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + int err; + + if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) + return 0; + + del_timer(&mhi_pdev->health_check_timer); + cancel_work_sync(&mhi_pdev->recovery_work); + + if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || + mhi_cntrl->ee != MHI_EE_AMSS) + goto pci_suspend; /* Nothing to do at MHI level */ + + /* Transition to M3 state */ + err = mhi_pm_suspend(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to suspend device: %d\n", err); + clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status); + return -EBUSY; + } + +pci_suspend: + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + + return 0; +} + +static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + int err; + + if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) + return 0; + + err = pci_enable_device(pdev); + if (err) + goto err_recovery; + + pci_set_master(pdev); + pci_wake_from_d3(pdev, false); + + if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || + mhi_cntrl->ee != MHI_EE_AMSS) + return 0; /* Nothing to do at MHI level */ + + /* Exit M3, transition to M0 state */ + err = mhi_pm_resume(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to resume device: %d\n", err); + goto err_recovery; + } + + /* Resume health check */ + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + + /* It can be a remote wakeup (no mhi runtime_get), update access time */ + pm_runtime_mark_last_busy(dev); + + return 0; + +err_recovery: + /* Do not fail to not mess up our PCI device state, the device likely + * lost power (d3cold) and we simply need to reset it from the recovery + * procedure, trigger the recovery asynchronously to prevent system + * suspend exit delaying. + */ + queue_work(system_long_wq, &mhi_pdev->recovery_work); + pm_runtime_mark_last_busy(dev); + + return 0; +} + +static int __maybe_unused mhi_pci_suspend(struct device *dev) +{ + pm_runtime_disable(dev); + return mhi_pci_runtime_suspend(dev); +} + +static int __maybe_unused mhi_pci_resume(struct device *dev) +{ + int ret; + + /* Depending the platform, device may have lost power (d3cold), we need + * to resume it now to check its state and recover when necessary. + */ + ret = mhi_pci_runtime_resume(dev); + pm_runtime_enable(dev); + + return ret; +} + +static int __maybe_unused mhi_pci_freeze(struct device *dev) +{ + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; + + /* We want to stop all operations, hibernation does not guarantee that + * device will be in the same state as before freezing, especially if + * the intermediate restore kernel reinitializes MHI device with new + * context. + */ + if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { + mhi_power_down(mhi_cntrl, true); + mhi_unprepare_after_power_down(mhi_cntrl); + } + + return 0; +} + +static int __maybe_unused mhi_pci_restore(struct device *dev) +{ + struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); + + /* Reinitialize the device */ + queue_work(system_long_wq, &mhi_pdev->recovery_work); + + return 0; +} + +static const struct dev_pm_ops mhi_pci_pm_ops = { + SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL) +#ifdef CONFIG_PM_SLEEP + .suspend = mhi_pci_suspend, + .resume = mhi_pci_resume, + .freeze = mhi_pci_freeze, + .thaw = mhi_pci_restore, + .restore = mhi_pci_restore, +#endif +}; + +static struct pci_driver mhi_pci_driver = { + .name = "mhi-pci-generic", + .id_table = mhi_pci_id_table, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove, + .shutdown = mhi_pci_shutdown, + .err_handler = &mhi_pci_err_handler, + .driver.pm = &mhi_pci_pm_ops +}; +module_pci_driver(mhi_pci_driver); + +MODULE_AUTHOR("Loic Poulain "); +MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c new file mode 100644 index 000000000000..c35c5ddc7220 --- /dev/null +++ b/drivers/bus/mhi/host/pm.c @@ -0,0 +1,1260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* + * Not all MHI state transitions are synchronous. Transitions like Linkdown, + * SYS_ERR, and shutdown can happen anytime asynchronously. This function will + * transition to a new state only if we're allowed to. + * + * Priority increases as we go down. For instance, from any state in L0, the + * transition can be made to states in L1, L2 and L3. A notable exception to + * this rule is state DISABLE. From DISABLE state we can only transition to + * POR state. Also, while in L2 state, user cannot jump back to previous + * L1 or L0 states. + * + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 <--> M0 + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT + * SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> DISABLE + */ +static const struct mhi_pm_transitions dev_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE + }, +}; + +enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) + return cur_state; + + if (unlikely(dev_state_transitions[index].from_state != cur_state)) + return cur_state; + + if (unlikely(!(dev_state_transitions[index].to_states & state))) + return cur_state; + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* NOP for backward compatibility, host allowed to ring DB in M2 state */ +static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) +{ +} + +static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->wake_get(mhi_cntrl, false); + mhi_cntrl->wake_put(mhi_cntrl, true); +} + +/* Handle device ready state transition */ +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event; + enum mhi_pm_state cur_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + int ret, i; + + /* Check if device entered error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, "Device link is not accessible\n"); + return -EIO; + } + + /* Wait for RESET to be cleared and READY bit to be set by the device */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, + interval_us); + if (ret) { + dev_err(dev, "Device failed to clear MHI Reset\n"); + return ret; + } + + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1, + interval_us); + if (ret) { + dev_err(dev, "Device failed to enter MHI Ready\n"); + return ret; + } + + dev_dbg(dev, "Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + dev_err(dev, "Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + dev_err(dev, "Device registers not accessible\n"); + goto error_mmio; + } + + /* Configure MMIO registers */ + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + dev_err(dev, "Error configuring MMIO registers\n"); + goto error_mmio; + } + + /* Add elements to all SW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if this is an offload or HW event */ + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); + /* Update all cores */ + smp_wmb(); + + /* Ring the event ring db */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Set MHI to M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + dev_err(dev, "Unable to transition to M0 state\n"); + return -EIO; + } + mhi_cntrl->M0++; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + + /* Ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Only ring primary cmd ring if ring is not empty */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* Ring channel DB registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + if (mhi_chan->db_cfg.reset_req) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->db_cfg.db_mode = true; + write_unlock_irq(&mhi_chan->lock); + } + + read_lock_irq(&mhi_chan->lock); + + /* Only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* + * After receiving the MHI state change event from the device indicating the + * transition to M1 state, the host can transition the device to M2 state + * for keeping it in low power state. + */ +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + + write_unlock_irq(&mhi_cntrl->pm_lock); + + mhi_cntrl->M2++; + wake_up_all(&mhi_cntrl->state_event); + + /* If there are any pending resources, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || + atomic_read(&mhi_cntrl->dev_wake))) { + dev_dbg(dev, + "Exiting M2, pending_pkts: %d dev_wake: %d\n", + atomic_read(&mhi_cntrl->pending_pkts), + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +/* MHI M3 completion handler */ +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + dev_err(dev, "Unable to transition to M3 state\n"); + return -EIO; + } + + mhi_cntrl->M3++; + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* Handle device Mission Mode transition */ +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; + int i, ret; + + dev_dbg(dev, "Processing Mission Mode transition\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + + if (!MHI_IN_MISSION_MODE(ee)) { + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + return -EIO; + } + mhi_cntrl->ee = ee; + write_unlock_irq(&mhi_cntrl->pm_lock); + + wake_up_all(&mhi_cntrl->state_event); + + device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, + mhi_destroy_device); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); + + /* Force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + ret = -EIO; + goto error_mission_mode; + } + + /* Add elements to all HW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); + /* Update to all cores */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* + * The MHI devices are only created when the client device switches its + * Execution Environment (EE) to either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +/* Handle shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + dev_dbg(dev, "Processing disable transition with PM state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + mutex_lock(&mhi_cntrl->pm_mutex); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + dev_dbg(dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, + 25000); + if (ret) + dev_err(dev, "Device failed to clear MHI Reset\n"); + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + dev_dbg(dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_dbg(dev, "Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* Move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + dev_err(dev, "Error moving from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + + dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Handle system error transitions */ +static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state, prev_state; + enum dev_st_transition next_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + + /* We must notify MHI control driver so it can clean up first */ + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_SYS_ERR_PROCESS) { + dev_err(dev, "Failed to transition from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + goto exit_sys_error_transition; + } + + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + /* Wake up threads waiting for state transition */ + wake_up_all(&mhi_cntrl->state_event); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + dev_dbg(dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &in_reset) || + !in_reset, timeout); + if (!ret || in_reset) { + dev_err(dev, "Device failed to exit MHI Reset state\n"); + goto exit_sys_error_transition; + } + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + dev_dbg(dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_dbg(dev, "Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* Transition to next state */ + if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (cur_state != MHI_PM_POR) { + dev_err(dev, "Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + goto exit_sys_error_transition; + } + next_state = DEV_ST_TRANSITION_PBL; + } else { + next_state = DEV_ST_TRANSITION_READY; + } + + mhi_queue_state_transition(mhi_cntrl, next_state); + +exit_sys_error_transition: + dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Queue a new work item and schedule work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); + + return 0; +} + +/* SYS_ERR worker */ +void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* skip if controller supports RDDM */ + if (mhi_cntrl->rddm_image) { + dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); + return; + } + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); +} + +/* Device State Transition worker */ +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling state transition: %s\n", + TO_DEV_STATE_TRANS_STR(itr->state)); + + switch (itr->state) { + case DEV_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + mhi_fw_load_handler(mhi_cntrl); + break; + case DEV_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + /* + * The MHI devices are only created when the client + * device switches its Execution Environment (EE) to + * either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + if (mhi_cntrl->fbc_download) + mhi_download_amss_image(mhi_cntrl); + break; + case DEV_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_FP: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + mhi_create_devices(mhi_cntrl); + break; + case DEV_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_SYS_ERR: + mhi_pm_sys_error_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_DISABLE: + mhi_pm_disable_transition(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; + int ret; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Return busy if there are any pending resources */ + if (atomic_read(&mhi_cntrl->dev_wake) || + atomic_read(&mhi_cntrl->pending_pkts)) + return -EBUSY; + + /* Take MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Could not enter M0/M1 state"); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake) || + atomic_read(&mhi_cntrl->pending_pkts)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + dev_dbg(dev, "Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_dbg(dev, "Waiting for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M3 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Notify clients about entering LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_suspend); + +static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) +{ + struct mhi_chan *itr, *tmp; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state cur_state; + int ret; + + dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { + dev_warn(dev, "Resuming from non M3 state (%s)\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + if (!force) + return -EINVAL; + } + + /* Notify clients about exiting LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M2 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, + "Did not enter M0 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + return __mhi_pm_resume(mhi_cntrl, false); +} +EXPORT_SYMBOL_GPL(mhi_pm_resume); + +int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) +{ + return __mhi_pm_resume(mhi_cntrl, true); +} +EXPORT_SYMBOL_GPL(mhi_pm_resume_force); + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +/* Assert device wake db */ +static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + + /* + * If force flag is set, then increment the wake count value and + * ring wake db + */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* + * If resources are already requested, then just increment + * the wake count value and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* De-assert device wake db */ +static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, + bool override) +{ + unsigned long flags; + + /* + * Only continue if there is a single resource, else just decrement + * and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + enum mhi_state state; + enum mhi_ee_type current_ee; + enum dev_st_transition next_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + int ret; + + dev_info(dev, "Requested to power ON\n"); + + /* Supply default wake routines if not provided by controller driver */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || + !mhi_cntrl->wake_toggle) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? + mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + + /* Setup BHI INTVEC */ + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* Confirm that the device is in valid exec env */ + if (!MHI_POWER_UP_CAPABLE(current_ee)) { + dev_err(dev, "%s is not a valid EE for power on\n", + TO_MHI_EXEC_STR(current_ee)); + ret = -EIO; + goto error_exit; + } + + state = mhi_get_mhi_state(mhi_cntrl); + dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", + TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); + + if (state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, + interval_us); + if (ret) { + dev_info(dev, "Failed to reset MHI due to syserr state\n"); + goto error_exit; + } + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto error_exit; + + /* Transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; + + mhi_queue_state_transition(mhi_cntrl, next_state); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + dev_info(dev, "Power on setup success\n"); + + return 0; + +error_exit: + mhi_cntrl->pm_state = MHI_PM_DISABLE; + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum mhi_pm_state cur_state, transition_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_cntrl->pm_state; + if (cur_state == MHI_PM_DISABLE) { + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; /* Already powered down */ + } + + /* If it's not a graceful shutdown, force MHI to linkdown state */ + transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : + MHI_PM_LD_ERR_FATAL_DETECT; + + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state != transition_state) { + dev_err(dev, "Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(transition_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + /* Force link down or error fatal detected state */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + } + + /* mark device inactive to avoid any further host processing */ + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + wake_up_all(&mhi_cntrl->state_event); + + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + + /* Wait for shutdown to complete */ + flush_work(&mhi_cntrl->st_worker); + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); +} +EXPORT_SYMBOL_GPL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; + if (ret) + mhi_power_down(mhi_cntrl, false); + + return ret; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Check if device is already in RDDM */ + if (mhi_cntrl->ee == MHI_EE_RDDM) + return 0; + + dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* Wait for RDDM event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake++; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + mhi_dev->dev_wake++; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake--; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) + mhi_trigger_resume(mhi_cntrl); + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_put); diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c deleted file mode 100644 index 9527b7d63840..000000000000 --- a/drivers/bus/mhi/pci_generic.c +++ /dev/null @@ -1,1105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * MHI PCI driver - MHI over PCI controller driver - * - * This module is a generic driver for registering MHI-over-PCI devices, - * such as PCIe QCOM modems. - * - * Copyright (C) 2020 Linaro Ltd - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MHI_PCI_DEFAULT_BAR_NUM 0 - -#define MHI_POST_RESET_DELAY_MS 2000 - -#define HEALTH_CHECK_PERIOD (HZ * 2) - -/** - * struct mhi_pci_dev_info - MHI PCI device specific information - * @config: MHI controller configuration - * @name: name of the PCI module - * @fw: firmware path (if any) - * @edl: emergency download mode firmware path (if any) - * @bar_num: PCI base address register to use for MHI MMIO register space - * @dma_data_width: DMA transfer word size (32 or 64 bits) - * @mru_default: default MRU size for MBIM network packets - * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead - * of inband wake support (such as sdx24) - */ -struct mhi_pci_dev_info { - const struct mhi_controller_config *config; - const char *name; - const char *fw; - const char *edl; - unsigned int bar_num; - unsigned int dma_data_width; - unsigned int mru_default; - bool sideband_wake; -}; - -#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_TO_DEVICE, \ - .ee_mask = BIT(MHI_EE_AMSS), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - } \ - -#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_FROM_DEVICE, \ - .ee_mask = BIT(MHI_EE_AMSS), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - } - -#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_FROM_DEVICE, \ - .ee_mask = BIT(MHI_EE_AMSS), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - .auto_queue = true, \ - } - -#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ - { \ - .num_elements = el_count, \ - .irq_moderation_ms = 0, \ - .irq = (ev_ring) + 1, \ - .priority = 1, \ - .mode = MHI_DB_BRST_DISABLE, \ - .data_type = MHI_ER_CTRL, \ - .hardware_event = false, \ - .client_managed = false, \ - .offload_channel = false, \ - } - -#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_TO_DEVICE, \ - .ee_mask = BIT(MHI_EE_AMSS), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_ENABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = true, \ - } \ - -#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_FROM_DEVICE, \ - .ee_mask = BIT(MHI_EE_AMSS), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_ENABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = true, \ - } - -#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_TO_DEVICE, \ - .ee_mask = BIT(MHI_EE_SBL), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - } \ - -#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_FROM_DEVICE, \ - .ee_mask = BIT(MHI_EE_SBL), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - } - -#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_TO_DEVICE, \ - .ee_mask = BIT(MHI_EE_FP), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - } \ - -#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \ - { \ - .num = ch_num, \ - .name = ch_name, \ - .num_elements = el_count, \ - .event_ring = ev_ring, \ - .dir = DMA_FROM_DEVICE, \ - .ee_mask = BIT(MHI_EE_FP), \ - .pollcfg = 0, \ - .doorbell = MHI_DB_BRST_DISABLE, \ - .lpm_notify = false, \ - .offload_channel = false, \ - .doorbell_mode_switch = false, \ - } - -#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \ - { \ - .num_elements = el_count, \ - .irq_moderation_ms = 5, \ - .irq = (ev_ring) + 1, \ - .priority = 1, \ - .mode = MHI_DB_BRST_DISABLE, \ - .data_type = MHI_ER_DATA, \ - .hardware_event = false, \ - .client_managed = false, \ - .offload_channel = false, \ - } - -#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ - { \ - .num_elements = el_count, \ - .irq_moderation_ms = 1, \ - .irq = (ev_ring) + 1, \ - .priority = 1, \ - .mode = MHI_DB_BRST_DISABLE, \ - .data_type = MHI_ER_DATA, \ - .hardware_event = true, \ - .client_managed = false, \ - .offload_channel = false, \ - .channel = ch_num, \ - } - -static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { - MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), - MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), - MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), - MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), - MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), - MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), - MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), - MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), - MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), - MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), -}; - -static struct mhi_event_config modem_qcom_v1_mhi_events[] = { - /* first ring is control+data ring */ - MHI_EVENT_CONFIG_CTRL(0, 64), - /* DIAG dedicated event ring */ - MHI_EVENT_CONFIG_DATA(1, 128), - /* Hardware channels request dedicated hardware event rings */ - MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), - MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) -}; - -static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { - .max_channels = 128, - .timeout_ms = 8000, - .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), - .ch_cfg = modem_qcom_v1_mhi_channels, - .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), - .event_cfg = modem_qcom_v1_mhi_events, -}; - -static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { - .name = "qcom-sdx65m", - .fw = "qcom/sdx65m/xbl.elf", - .edl = "qcom/sdx65m/edl.mbn", - .config = &modem_qcom_v1_mhiv_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .sideband_wake = false, -}; - -static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { - .name = "qcom-sdx55m", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", - .config = &modem_qcom_v1_mhiv_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .mru_default = 32768, - .sideband_wake = false, -}; - -static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { - .name = "qcom-sdx24", - .edl = "qcom/prog_firehose_sdx24.mbn", - .config = &modem_qcom_v1_mhiv_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .sideband_wake = true, -}; - -static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { - MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0), - MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0), - MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), - MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), - MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), - MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), - MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), - MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), - MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), - MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), - /* The EDL firmware is a flash-programmer exposing firehose protocol */ - MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), - MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), -}; - -static struct mhi_event_config mhi_quectel_em1xx_events[] = { - MHI_EVENT_CONFIG_CTRL(0, 128), - MHI_EVENT_CONFIG_DATA(1, 128), - MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), - MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) -}; - -static const struct mhi_controller_config modem_quectel_em1xx_config = { - .max_channels = 128, - .timeout_ms = 20000, - .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels), - .ch_cfg = mhi_quectel_em1xx_channels, - .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events), - .event_cfg = mhi_quectel_em1xx_events, -}; - -static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { - .name = "quectel-em1xx", - .edl = "qcom/prog_firehose_sdx24.mbn", - .config = &modem_quectel_em1xx_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .mru_default = 32768, - .sideband_wake = true, -}; - -static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { - MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), - MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), - MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), - MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), - MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), - MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), - MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), - MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), -}; - -static struct mhi_event_config mhi_foxconn_sdx55_events[] = { - MHI_EVENT_CONFIG_CTRL(0, 128), - MHI_EVENT_CONFIG_DATA(1, 128), - MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), - MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) -}; - -static const struct mhi_controller_config modem_foxconn_sdx55_config = { - .max_channels = 128, - .timeout_ms = 20000, - .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), - .ch_cfg = mhi_foxconn_sdx55_channels, - .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), - .event_cfg = mhi_foxconn_sdx55_events, -}; - -static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { - .name = "foxconn-sdx55", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", - .config = &modem_foxconn_sdx55_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .mru_default = 32768, - .sideband_wake = false, -}; - -static const struct mhi_channel_config mhi_mv31_channels[] = { - MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), - MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), - /* MBIM Control Channel */ - MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), - MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), - /* MBIM Data Channel */ - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), -}; - -static struct mhi_event_config mhi_mv31_events[] = { - MHI_EVENT_CONFIG_CTRL(0, 256), - MHI_EVENT_CONFIG_DATA(1, 256), - MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), - MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), -}; - -static const struct mhi_controller_config modem_mv31_config = { - .max_channels = 128, - .timeout_ms = 20000, - .num_channels = ARRAY_SIZE(mhi_mv31_channels), - .ch_cfg = mhi_mv31_channels, - .num_events = ARRAY_SIZE(mhi_mv31_events), - .event_cfg = mhi_mv31_events, -}; - -static const struct mhi_pci_dev_info mhi_mv31_info = { - .name = "cinterion-mv31", - .config = &modem_mv31_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .mru_default = 32768, -}; - -static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { - MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), - MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), - MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), - MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), - MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), - MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), - MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), - MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), - MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), - MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), -}; - -static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { - /* first ring is control+data and DIAG ring */ - MHI_EVENT_CONFIG_CTRL(0, 2048), - /* Hardware channels request dedicated hardware event rings */ - MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), - MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) -}; - -static const struct mhi_controller_config modem_sierra_em919x_config = { - .max_channels = 128, - .timeout_ms = 24000, - .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), - .ch_cfg = mhi_sierra_em919x_channels, - .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), - .event_cfg = modem_sierra_em919x_mhi_events, -}; - -static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { - .name = "sierra-em919x", - .config = &modem_sierra_em919x_config, - .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32, - .sideband_wake = false, -}; - -static const struct pci_device_id mhi_pci_id_table[] = { - /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), - .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, - { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ - .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ - .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, - /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ - { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, - /* DW5930e (sdx55), With eSIM, It's also T99W175 */ - { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, - /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ - { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, - /* T99W175 (sdx55), Based on Qualcomm new baseline */ - { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, - /* MV31-W (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00b3), - .driver_data = (kernel_ulong_t) &mhi_mv31_info }, - { } -}; -MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); - -enum mhi_pci_device_status { - MHI_PCI_DEV_STARTED, - MHI_PCI_DEV_SUSPENDED, -}; - -struct mhi_pci_device { - struct mhi_controller mhi_cntrl; - struct pci_saved_state *pci_state; - struct work_struct recovery_work; - struct timer_list health_check_timer; - unsigned long status; -}; - -static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, - void __iomem *addr, u32 *out) -{ - *out = readl(addr); - return 0; -} - -static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, - void __iomem *addr, u32 val) -{ - writel(val, addr); -} - -static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, - enum mhi_callback cb) -{ - struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); - - /* Nothing to do for now */ - switch (cb) { - case MHI_CB_FATAL_ERROR: - case MHI_CB_SYS_ERROR: - dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb); - pm_runtime_forbid(&pdev->dev); - break; - case MHI_CB_EE_MISSION_MODE: - pm_runtime_allow(&pdev->dev); - break; - default: - break; - } -} - -static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force) -{ - /* no-op */ -} - -static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override) -{ - /* no-op */ -} - -static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl) -{ - /* no-op */ -} - -static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) -{ - struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); - u16 vendor = 0; - - if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) - return false; - - if (vendor == (u16) ~0 || vendor == 0) - return false; - - return true; -} - -static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, - unsigned int bar_num, u64 dma_mask) -{ - struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); - int err; - - err = pci_assign_resource(pdev, bar_num); - if (err) - return err; - - err = pcim_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); - return err; - } - - err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); - if (err) { - dev_err(&pdev->dev, "failed to map pci region: %d\n", err); - return err; - } - mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; - mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); - - err = dma_set_mask_and_coherent(&pdev->dev, dma_mask); - if (err) { - dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); - return err; - } - - pci_set_master(pdev); - - return 0; -} - -static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, - const struct mhi_controller_config *mhi_cntrl_config) -{ - struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); - int nr_vectors, i; - int *irq; - - /* - * Alloc one MSI vector for BHI + one vector per event ring, ideally... - * No explicit pci_free_irq_vectors required, done by pcim_release. - */ - mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; - - nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); - if (nr_vectors < 0) { - dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", - nr_vectors); - return nr_vectors; - } - - if (nr_vectors < mhi_cntrl->nr_irqs) { - dev_warn(&pdev->dev, "using shared MSI\n"); - - /* Patch msi vectors, use only one (shared) */ - for (i = 0; i < mhi_cntrl_config->num_events; i++) - mhi_cntrl_config->event_cfg[i].irq = 0; - mhi_cntrl->nr_irqs = 1; - } - - irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); - if (!irq) - return -ENOMEM; - - for (i = 0; i < mhi_cntrl->nr_irqs; i++) { - int vector = i >= nr_vectors ? (nr_vectors - 1) : i; - - irq[i] = pci_irq_vector(pdev, vector); - } - - mhi_cntrl->irq = irq; - - return 0; -} - -static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) -{ - /* The runtime_get() MHI callback means: - * Do whatever is requested to leave M3. - */ - return pm_runtime_get(mhi_cntrl->cntrl_dev); -} - -static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) -{ - /* The runtime_put() MHI callback means: - * Device can be moved in M3 state. - */ - pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); - pm_runtime_put(mhi_cntrl->cntrl_dev); -} - -static void mhi_pci_recovery_work(struct work_struct *work) -{ - struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, - recovery_work); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); - int err; - - dev_warn(&pdev->dev, "device recovery started\n"); - - del_timer(&mhi_pdev->health_check_timer); - pm_runtime_forbid(&pdev->dev); - - /* Clean up MHI state */ - if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { - mhi_power_down(mhi_cntrl, false); - mhi_unprepare_after_power_down(mhi_cntrl); - } - - pci_set_power_state(pdev, PCI_D0); - pci_load_saved_state(pdev, mhi_pdev->pci_state); - pci_restore_state(pdev); - - if (!mhi_pci_is_alive(mhi_cntrl)) - goto err_try_reset; - - err = mhi_prepare_for_power_up(mhi_cntrl); - if (err) - goto err_try_reset; - - err = mhi_sync_power_up(mhi_cntrl); - if (err) - goto err_unprepare; - - dev_dbg(&pdev->dev, "Recovery completed\n"); - - set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); - return; - -err_unprepare: - mhi_unprepare_after_power_down(mhi_cntrl); -err_try_reset: - if (pci_reset_function(pdev)) - dev_err(&pdev->dev, "Recovery failed\n"); -} - -static void health_check(struct timer_list *t) -{ - struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - - if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || - test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) - return; - - if (!mhi_pci_is_alive(mhi_cntrl)) { - dev_err(mhi_cntrl->cntrl_dev, "Device died\n"); - queue_work(system_long_wq, &mhi_pdev->recovery_work); - return; - } - - /* reschedule in two seconds */ - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -} - -static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; - const struct mhi_controller_config *mhi_cntrl_config; - struct mhi_pci_device *mhi_pdev; - struct mhi_controller *mhi_cntrl; - int err; - - dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); - - /* mhi_pdev.mhi_cntrl must be zero-initialized */ - mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); - if (!mhi_pdev) - return -ENOMEM; - - INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); - timer_setup(&mhi_pdev->health_check_timer, health_check, 0); - - mhi_cntrl_config = info->config; - mhi_cntrl = &mhi_pdev->mhi_cntrl; - - mhi_cntrl->cntrl_dev = &pdev->dev; - mhi_cntrl->iova_start = 0; - mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); - mhi_cntrl->fw_image = info->fw; - mhi_cntrl->edl_image = info->edl; - - mhi_cntrl->read_reg = mhi_pci_read_reg; - mhi_cntrl->write_reg = mhi_pci_write_reg; - mhi_cntrl->status_cb = mhi_pci_status_cb; - mhi_cntrl->runtime_get = mhi_pci_runtime_get; - mhi_cntrl->runtime_put = mhi_pci_runtime_put; - mhi_cntrl->mru = info->mru_default; - - if (info->sideband_wake) { - mhi_cntrl->wake_get = mhi_pci_wake_get_nop; - mhi_cntrl->wake_put = mhi_pci_wake_put_nop; - mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; - } - - err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); - if (err) - return err; - - err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); - if (err) - return err; - - pci_set_drvdata(pdev, mhi_pdev); - - /* Have stored pci confspace at hand for restore in sudden PCI error. - * cache the state locally and discard the PCI core one. - */ - pci_save_state(pdev); - mhi_pdev->pci_state = pci_store_saved_state(pdev); - pci_load_saved_state(pdev, NULL); - - pci_enable_pcie_error_reporting(pdev); - - err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); - if (err) - goto err_disable_reporting; - - /* MHI bus does not power up the controller by default */ - err = mhi_prepare_for_power_up(mhi_cntrl); - if (err) { - dev_err(&pdev->dev, "failed to prepare MHI controller\n"); - goto err_unregister; - } - - err = mhi_sync_power_up(mhi_cntrl); - if (err) { - dev_err(&pdev->dev, "failed to power up MHI controller\n"); - goto err_unprepare; - } - - set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); - - /* start health check */ - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); - - /* Only allow runtime-suspend if PME capable (for wakeup) */ - if (pci_pme_capable(pdev, PCI_D3hot)) { - pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); - } - - return 0; - -err_unprepare: - mhi_unprepare_after_power_down(mhi_cntrl); -err_unregister: - mhi_unregister_controller(mhi_cntrl); -err_disable_reporting: - pci_disable_pcie_error_reporting(pdev); - - return err; -} - -static void mhi_pci_remove(struct pci_dev *pdev) -{ - struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - - del_timer_sync(&mhi_pdev->health_check_timer); - cancel_work_sync(&mhi_pdev->recovery_work); - - if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { - mhi_power_down(mhi_cntrl, true); - mhi_unprepare_after_power_down(mhi_cntrl); - } - - /* balancing probe put_noidle */ - if (pci_pme_capable(pdev, PCI_D3hot)) - pm_runtime_get_noresume(&pdev->dev); - - mhi_unregister_controller(mhi_cntrl); - pci_disable_pcie_error_reporting(pdev); -} - -static void mhi_pci_shutdown(struct pci_dev *pdev) -{ - mhi_pci_remove(pdev); - pci_set_power_state(pdev, PCI_D3hot); -} - -static void mhi_pci_reset_prepare(struct pci_dev *pdev) -{ - struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - - dev_info(&pdev->dev, "reset\n"); - - del_timer(&mhi_pdev->health_check_timer); - - /* Clean up MHI state */ - if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { - mhi_power_down(mhi_cntrl, false); - mhi_unprepare_after_power_down(mhi_cntrl); - } - - /* cause internal device reset */ - mhi_soc_reset(mhi_cntrl); - - /* Be sure device reset has been executed */ - msleep(MHI_POST_RESET_DELAY_MS); -} - -static void mhi_pci_reset_done(struct pci_dev *pdev) -{ - struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - int err; - - /* Restore initial known working PCI state */ - pci_load_saved_state(pdev, mhi_pdev->pci_state); - pci_restore_state(pdev); - - /* Is device status available ? */ - if (!mhi_pci_is_alive(mhi_cntrl)) { - dev_err(&pdev->dev, "reset failed\n"); - return; - } - - err = mhi_prepare_for_power_up(mhi_cntrl); - if (err) { - dev_err(&pdev->dev, "failed to prepare MHI controller\n"); - return; - } - - err = mhi_sync_power_up(mhi_cntrl); - if (err) { - dev_err(&pdev->dev, "failed to power up MHI controller\n"); - mhi_unprepare_after_power_down(mhi_cntrl); - return; - } - - set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -} - -static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - - dev_err(&pdev->dev, "PCI error detected, state = %u\n", state); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - /* Clean up MHI state */ - if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { - mhi_power_down(mhi_cntrl, false); - mhi_unprepare_after_power_down(mhi_cntrl); - } else { - /* Nothing to do */ - return PCI_ERS_RESULT_RECOVERED; - } - - pci_disable_device(pdev); - - return PCI_ERS_RESULT_NEED_RESET; -} - -static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev) -{ - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static void mhi_pci_io_resume(struct pci_dev *pdev) -{ - struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); - - dev_err(&pdev->dev, "PCI slot reset done\n"); - - queue_work(system_long_wq, &mhi_pdev->recovery_work); -} - -static const struct pci_error_handlers mhi_pci_err_handler = { - .error_detected = mhi_pci_error_detected, - .slot_reset = mhi_pci_slot_reset, - .resume = mhi_pci_io_resume, - .reset_prepare = mhi_pci_reset_prepare, - .reset_done = mhi_pci_reset_done, -}; - -static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - int err; - - if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) - return 0; - - del_timer(&mhi_pdev->health_check_timer); - cancel_work_sync(&mhi_pdev->recovery_work); - - if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || - mhi_cntrl->ee != MHI_EE_AMSS) - goto pci_suspend; /* Nothing to do at MHI level */ - - /* Transition to M3 state */ - err = mhi_pm_suspend(mhi_cntrl); - if (err) { - dev_err(&pdev->dev, "failed to suspend device: %d\n", err); - clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status); - return -EBUSY; - } - -pci_suspend: - pci_disable_device(pdev); - pci_wake_from_d3(pdev, true); - - return 0; -} - -static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - int err; - - if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) - return 0; - - err = pci_enable_device(pdev); - if (err) - goto err_recovery; - - pci_set_master(pdev); - pci_wake_from_d3(pdev, false); - - if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || - mhi_cntrl->ee != MHI_EE_AMSS) - return 0; /* Nothing to do at MHI level */ - - /* Exit M3, transition to M0 state */ - err = mhi_pm_resume(mhi_cntrl); - if (err) { - dev_err(&pdev->dev, "failed to resume device: %d\n", err); - goto err_recovery; - } - - /* Resume health check */ - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); - - /* It can be a remote wakeup (no mhi runtime_get), update access time */ - pm_runtime_mark_last_busy(dev); - - return 0; - -err_recovery: - /* Do not fail to not mess up our PCI device state, the device likely - * lost power (d3cold) and we simply need to reset it from the recovery - * procedure, trigger the recovery asynchronously to prevent system - * suspend exit delaying. - */ - queue_work(system_long_wq, &mhi_pdev->recovery_work); - pm_runtime_mark_last_busy(dev); - - return 0; -} - -static int __maybe_unused mhi_pci_suspend(struct device *dev) -{ - pm_runtime_disable(dev); - return mhi_pci_runtime_suspend(dev); -} - -static int __maybe_unused mhi_pci_resume(struct device *dev) -{ - int ret; - - /* Depending the platform, device may have lost power (d3cold), we need - * to resume it now to check its state and recover when necessary. - */ - ret = mhi_pci_runtime_resume(dev); - pm_runtime_enable(dev); - - return ret; -} - -static int __maybe_unused mhi_pci_freeze(struct device *dev) -{ - struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); - struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - - /* We want to stop all operations, hibernation does not guarantee that - * device will be in the same state as before freezing, especially if - * the intermediate restore kernel reinitializes MHI device with new - * context. - */ - if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { - mhi_power_down(mhi_cntrl, true); - mhi_unprepare_after_power_down(mhi_cntrl); - } - - return 0; -} - -static int __maybe_unused mhi_pci_restore(struct device *dev) -{ - struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); - - /* Reinitialize the device */ - queue_work(system_long_wq, &mhi_pdev->recovery_work); - - return 0; -} - -static const struct dev_pm_ops mhi_pci_pm_ops = { - SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL) -#ifdef CONFIG_PM_SLEEP - .suspend = mhi_pci_suspend, - .resume = mhi_pci_resume, - .freeze = mhi_pci_freeze, - .thaw = mhi_pci_restore, - .restore = mhi_pci_restore, -#endif -}; - -static struct pci_driver mhi_pci_driver = { - .name = "mhi-pci-generic", - .id_table = mhi_pci_id_table, - .probe = mhi_pci_probe, - .remove = mhi_pci_remove, - .shutdown = mhi_pci_shutdown, - .err_handler = &mhi_pci_err_handler, - .driver.pm = &mhi_pci_pm_ops -}; -module_pci_driver(mhi_pci_driver); - -MODULE_AUTHOR("Loic Poulain "); -MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); -MODULE_LICENSE("GPL"); -- cgit v1.2.3