summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-03-21 22:35:20 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-03-21 22:35:20 +0300
commite09bf86f3d53ecf4da61163d88036c4c16419d70 (patch)
tree48993f46e882041015edb54c0f9ef7e7f98bd5e0 /drivers
parent2ac2b1665d3fbec6ca709dd6ef3ea05f4a51ee4c (diff)
parenta788e53c05aee6e3d60792a59e10c0fac56b5086 (diff)
downloadlinux-e09bf86f3d53ecf4da61163d88036c4c16419d70.tar.xz
Merge tag 'usb-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
Pull USB / Thunderbolt updates from Greg KH: "Here is the big set of USB and Thunderbolt changes for 6.9-rc1. Lots of tiny changes and forward progress to support new hardware and better support for existing devices. Included in here are: - Thunderbolt (i.e. USB4) updates for newer hardware and uses as more people start to use the hardware - default USB authentication mode Kconfig and documentation update to make it more obvious what is going on - USB typec updates and enhancements - usual dwc3 driver updates - usual xhci driver updates - function USB (i.e. gadget) driver updates and additions - new device ids for lots of drivers - loads of other small updates, full details in the shortlog All of these, including a "last minute regression fix" have been in linux-next with no reported issues" * tag 'usb-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (185 commits) usb: usb-acpi: Fix oops due to freeing uninitialized pld pointer usb: gadget: net2272: Use irqflags in the call to net2272_probe_fin usb: gadget: tegra-xudc: Fix USB3 PHY retrieval logic phy: tegra: xusb: Add API to retrieve the port number of phy USB: gadget: pxa27x_udc: Remove unused of_gpio.h usb: gadget/snps_udc_plat: Remove unused of_gpio.h usb: ohci-pxa27x: Remove unused of_gpio.h usb: sl811-hcd: only defined function checkdone if QUIRK2 is defined usb: Clarify expected behavior of dev_bin_attrs_are_visible() xhci: Allow RPM on the USB controller (1022:43f7) by default usb: isp1760: remove SLAB_MEM_SPREAD flag usage usb: misc: onboard_hub: use pointer consistently in the probe function usb: gadget: fsl: Increase size of name buffer for endpoints usb: gadget: fsl: Add of device table to enable module autoloading usb: typec: tcpm: add support to set tcpc connector orientatition usb: typec: tcpci: add generic tcpci fallback compatible dt-bindings: usb: typec-tcpci: add tcpci fallback binding usb: gadget: fsl-udc: Replace custom log wrappers by dev_{err,warn,dbg,vdbg} usb: core: Set connect_type of ports based on DT node dt-bindings: usb: Add downstream facing ports to realtek binding ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-core.c47
-rw-r--r--drivers/phy/realtek/Kconfig32
-rw-r--r--drivers/phy/realtek/Makefile3
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c1312
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c748
-rw-r--r--drivers/phy/tegra/xusb.c13
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c19
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h4
-rw-r--r--drivers/thunderbolt/domain.c19
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/thunderbolt/lc.c45
-rw-r--r--drivers/thunderbolt/nhi.c11
-rw-r--r--drivers/thunderbolt/nvm.c4
-rw-r--r--drivers/thunderbolt/path.c13
-rw-r--r--drivers/thunderbolt/quirks.c14
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/switch.c140
-rw-r--r--drivers/thunderbolt/tb.c890
-rw-r--r--drivers/thunderbolt/tb.h29
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/trace.h188
-rw-r--r--drivers/thunderbolt/tunnel.c96
-rw-r--r--drivers/thunderbolt/tunnel.h6
-rw-r--r--drivers/thunderbolt/usb4.c43
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/thunderbolt/xdomain.c16
-rw-r--r--drivers/usb/cdns3/drd.c2
-rw-r--r--drivers/usb/core/Kconfig27
-rw-r--r--drivers/usb/core/driver.c8
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd.c20
-rw-r--r--drivers/usb/core/hub.c29
-rw-r--r--drivers/usb/core/message.c7
-rw-r--r--drivers/usb/core/of.c71
-rw-r--r--drivers/usb/core/phy.c120
-rw-r--r--drivers/usb/core/phy.h3
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/sysfs.c103
-rw-r--r--drivers/usb/core/usb-acpi.c46
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c42
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c276
-rw-r--r--drivers/usb/dwc3/ep0.c1
-rw-r--r--drivers/usb/dwc3/gadget.c91
-rw-r--r--drivers/usb/dwc3/gadget.h1
-rw-r--r--drivers/usb/dwc3/host.c50
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/function/f_fs.c533
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/uvc_video.c115
-rw-r--r--drivers/usb/gadget/udc/core.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c127
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h47
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c1
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c39
-rw-r--r--drivers/usb/host/ehci-orion.c18
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/xhci-caps.h85
-rw-r--r--drivers/usb/host/xhci-dbgcap.c13
-rw-r--r--drivers/usb/host/xhci-dbgcap.h2
-rw-r--r--drivers/usb/host/xhci-hub.c69
-rw-r--r--drivers/usb/host/xhci-mem.c95
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c14
-rw-r--r--drivers/usb/host/xhci-pci.c15
-rw-r--r--drivers/usb/host/xhci-port.h176
-rw-r--r--drivers/usb/host/xhci-ring.c227
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/host/xhci.c56
-rw-r--r--drivers/usb/host/xhci.h272
-rw-r--r--drivers/usb/image/mdc800.c1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h7
-rw-r--r--drivers/usb/mtu3/mtu3_host.c30
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/phy/phy-generic.c55
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/roles/class.c43
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/keyspan.c1
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/storage/freecom.c1
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/typec/altmodes/displayport.c165
-rw-r--r--drivers/usb/typec/bus.c102
-rw-r--r--drivers/usb/typec/class.c61
-rw-r--r--drivers/usb/typec/class.h7
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/it5205.c294
-rw-r--r--drivers/usb/typec/pd.c30
-rw-r--r--drivers/usb/typec/retimer.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/Makefile3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c254
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h27
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c159
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h94
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c80
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c290
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h172
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c27
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c38
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1052
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c294
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h107
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c92
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
124 files changed, 7775 insertions, 2346 deletions
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 4cef568231bf..787354b849c7 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -87,6 +87,7 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
+source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index fb3dc9de6111..868a220ed0f6 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -26,6 +26,7 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
+ realtek/ \
renesas/ \
rockchip/ \
samsung/ \
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 7f9b4de772ee..c5c8d70bc853 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -490,6 +490,53 @@ int phy_calibrate(struct phy *phy)
EXPORT_SYMBOL_GPL(phy_calibrate);
/**
+ * phy_notify_connect() - phy connect notification
+ * @phy: the phy returned by phy_get()
+ * @port: the port index for connect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int phy_notify_connect(struct phy *phy, int port)
+{
+ int ret;
+
+ if (!phy || !phy->ops->connect)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->connect(phy, port);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_notify_connect);
+
+/**
+ * phy_notify_disconnect() - phy disconnect notification
+ * @phy: the phy returned by phy_get()
+ * @port: the port index for disconnect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ *
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int phy_notify_disconnect(struct phy *phy, int port)
+{
+ int ret;
+
+ if (!phy || !phy->ops->disconnect)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->disconnect(phy, port);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_notify_disconnect);
+
+/**
* phy_configure() - Changes the phy parameters
* @phy: the phy returned by phy_get()
* @opts: New configuration to apply
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
new file mode 100644
index 000000000000..75ac7e7c31ae
--- /dev/null
+++ b/drivers/phy/realtek/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Realtek platforms
+#
+
+if ARCH_REALTEK || COMPILE_TEST
+
+config PHY_RTK_RTD_USB2PHY
+ tristate "Realtek RTD USB2 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB2 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+config PHY_RTK_RTD_USB3PHY
+ tristate "Realtek RTD USB3 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB3 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
new file mode 100644
index 000000000000..ed7b47ff8a26
--- /dev/null
+++ b/drivers/phy/realtek/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
+obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
new file mode 100644
index 000000000000..e3ad7cea5109
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -0,0 +1,1312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb2.c RTK usb2.0 PHY driver
+ *
+ * Copyright (C) 2023 Realtek Semiconductor Corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+
+/* GUSB2PHYACCn register */
+#define PHY_NEW_REG_REQ BIT(25)
+#define PHY_VSTS_BUSY BIT(23)
+#define PHY_VCTRL_SHIFT 8
+#define PHY_REG_DATA_MASK 0xff
+
+#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
+#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
+
+#define EFUS_USB_DC_CAL_RATE 2
+#define EFUS_USB_DC_CAL_MAX 7
+
+#define EFUS_USB_DC_DIS_RATE 1
+#define EFUS_USB_DC_DIS_MAX 7
+
+#define MAX_PHY_DATA_SIZE 20
+#define OFFEST_PHY_READ 0x20
+
+#define MAX_USB_PHY_NUM 4
+#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
+
+#define SET_PAGE_OFFSET 0xf4
+#define SET_PAGE_0 0x9b
+#define SET_PAGE_1 0xbb
+#define SET_PAGE_2 0xdb
+
+#define PAGE_START 0xe0
+#define PAGE0_0XE4 0xe4
+#define PAGE0_0XE6 0xe6
+#define PAGE0_0XE7 0xe7
+#define PAGE1_0XE0 0xe0
+#define PAGE1_0XE2 0xe2
+
+#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
+#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
+#define DEFAULT_DC_DRIVING_VALUE (0x8)
+#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
+#define HS_CLK_SELECT BIT(6)
+
+struct phy_reg {
+ void __iomem *reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0;
+ int vstatus_index;
+};
+
+struct phy_data {
+ u8 addr;
+ u8 data;
+};
+
+struct phy_cfg {
+ int page0_size;
+ struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
+ int page1_size;
+ struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
+ int page2_size;
+ struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
+
+ int num_phy;
+
+ bool check_efuse;
+ int check_efuse_version;
+#define CHECK_EFUSE_V1 1
+#define CHECK_EFUSE_V2 2
+ int efuse_dc_driving_rate;
+ int efuse_dc_disconnect_rate;
+ int dc_driving_mask;
+ int dc_disconnect_mask;
+ bool usb_dc_disconnect_at_page0;
+ int driving_updated_for_dev_dis;
+
+ bool do_toggle;
+ bool do_toggle_driving;
+ bool use_default_parameter;
+ bool is_double_sensitivity_mode;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ s8 efuse_usb_dc_cal;
+ s8 efuse_usb_dc_dis;
+
+ /* Get from dts */
+ bool inverse_hstx_sync_clock;
+ u32 driving_level;
+ s32 driving_level_compensate;
+ s32 disconnection_compensate;
+};
+
+struct rtk_phy {
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
+static inline int page_addr_to_array_index(u8 addr)
+{
+ return (int)((((addr) - PAGE_START) & 0x7) +
+ ((((addr) - PAGE_START) & 0x10) >> 1));
+}
+
+static inline u8 array_index_to_page_addr(int index)
+{
+ return ((((index) + PAGE_START) & 0x7) +
+ ((((index) & 0x8) << 1) + PAGE_START));
+}
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ unsigned int val;
+ int ret = 0;
+
+ addr -= OFFEST_PHY_READ;
+
+ /* polling until VBusy == 0 */
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ val = readl(reg_gusb2phyacc0);
+
+ return (char)(val & PHY_REG_DATA_MASK);
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
+{
+ unsigned int val;
+ void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ int shift_bits = phy_reg->vstatus_index * 8;
+ int ret = 0;
+
+ /* write data to VStatusOut2 (data output to phy) */
+ writel((u32)data << shift_bits, reg_wrap_vstatus);
+
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
+{
+ switch (page) {
+ case 0:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
+ case 1:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
+ case 2:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
+ default:
+ pr_err("%s error page=%d\n", __func__, page);
+ }
+
+ return -EINVAL;
+}
+
+static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+ int offset = 4;
+
+ val = (s32)((data >> offset) & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~(dc_disconnect_mask << offset))) |
+ (val & dc_disconnect_mask) << offset;
+
+ return ret;
+}
+
+/* updated disconnect level at page0 */
+static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ u8 addr, data;
+ int offset = 4;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE0_0XE4;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~(dc_disconnect_mask << offset)) |
+ (DEFAULT_DC_DISCONNECTION_VALUE << offset);
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_dis)
+ val = (s32)(phy_parameter->efuse_usb_dc_dis +
+ phy_parameter->disconnection_compensate);
+ else
+ val = (s32)((data & dc_disconnect_mask) +
+ phy_parameter->disconnection_compensate);
+ }
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
+
+ return ret;
+}
+
+/* updated disconnect level at page1 */
+static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ struct phy_reg *phy_reg;
+ u8 addr, data;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ i = page_addr_to_array_index(PAGE1_0XE2);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE1_0XE2;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_cfg->usb_dc_disconnect_at_page0)
+ update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
+ else
+ update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
+}
+
+static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ s32 driving_level_compensate = phy_parameter->driving_level_compensate;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+ s32 val;
+ u8 ret;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_driving_mask) + driving_level_compensate
+ + phy_parameter->efuse_usb_dc_cal;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_cal)
+ val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
+ + driving_level_compensate);
+ else
+ val = (s32)(data & dc_driving_mask);
+ }
+
+ if (val > dc_driving_mask)
+ val = dc_driving_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
+
+ return ret;
+}
+
+static void update_dc_driving_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+ if (!phy_cfg->page0[4].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[4].addr = PAGE0_0XE4;
+ phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
+ u32 dc_driving_mask;
+ u8 driving_level;
+ u8 data;
+
+ data = phy_cfg->page0[4].data;
+ dc_driving_mask = phy_cfg->dc_driving_mask;
+ driving_level = data & dc_driving_mask;
+
+ dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
+ __func__, driving_level, phy_parameter->driving_level);
+
+ phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
+ (phy_parameter->driving_level & dc_driving_mask);
+ }
+
+ phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
+ phy_parameter,
+ phy_cfg->page0[4].data);
+}
+
+static void update_hs_clk_select(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_parameter->inverse_hstx_sync_clock) {
+ if (!phy_cfg->page0[6].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[6].addr = PAGE0_0XE6;
+ phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
+ }
+
+ phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
+ }
+}
+
+static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
+ int index, bool connect)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ u8 addr, data;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ goto out;
+
+ if (phy_cfg->is_double_sensitivity_mode)
+ goto do_toggle_driving;
+
+ /* Set page 0 */
+ rtk_phy_set_page(phy_reg, 0);
+
+ addr = PAGE0_0XE7;
+ data = rtk_phy_read(phy_reg, addr);
+
+ if (connect)
+ rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
+ else
+ rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
+
+do_toggle_driving:
+
+ if (!phy_cfg->do_toggle_driving)
+ goto do_toggle;
+
+ /* Page 0 addr 0xE4 driving capability */
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ addr = phy_data_page[i].addr;
+ data = phy_data_page[i].data;
+
+ if (connect) {
+ rtk_phy_write(phy_reg, addr, data);
+ } else {
+ u8 value;
+ s32 tmp;
+ s32 driving_updated =
+ phy_cfg->driving_updated_for_dev_dis;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+
+ tmp = (s32)(data & dc_driving_mask) + driving_updated;
+
+ if (tmp > dc_driving_mask)
+ tmp = dc_driving_mask;
+ else if (tmp < 0)
+ tmp = 0;
+
+ value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
+
+ rtk_phy_write(phy_reg, addr, value);
+ }
+
+do_toggle:
+ /* restore dc disconnect level before toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, false);
+
+ /* Set page 1 */
+ rtk_phy_set_page(phy_reg, 1);
+
+ addr = PAGE1_0XE0;
+ data = rtk_phy_read(phy_reg, addr);
+
+ rtk_phy_write(phy_reg, addr, data &
+ (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data |
+ (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+
+ /* update dc disconnect level after toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, true);
+
+out:
+ return;
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_reg *phy_reg;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter) {
+ dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
+ __func__, index);
+ goto do_toggle;
+ }
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto do_toggle;
+
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+do_toggle:
+ do_rtk_phy_toggle(rtk_phy, index, false);
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ unsigned long phy_init_time = jiffies;
+ int i, ret = 0;
+
+ if (!rtk_phy)
+ return -EINVAL;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static void rtk_phy_toggle(struct rtk_phy *rtk_phy, bool connect, int port)
+{
+ int index = port;
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_connect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, true, port);
+
+ return 0;
+}
+
+static int rtk_phy_disconnect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, false, port);
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .connect = rtk_phy_connect,
+ .disconnect = rtk_phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " check_efuse_version: %d\n",
+ phy_cfg->check_efuse_version);
+ seq_printf(s, " efuse_dc_driving_rate: %d\n",
+ phy_cfg->efuse_dc_driving_rate);
+ seq_printf(s, " dc_driving_mask: 0x%x\n",
+ phy_cfg->dc_driving_mask);
+ seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
+ phy_cfg->efuse_dc_disconnect_rate);
+ seq_printf(s, " dc_disconnect_mask: 0x%x\n",
+ phy_cfg->dc_disconnect_mask);
+ seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
+ phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_driving: %s\n",
+ phy_cfg->do_toggle_driving ? "Enable" : "Disable");
+ seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
+ phy_cfg->driving_updated_for_dev_dis);
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+ seq_printf(s, " is_double_sensitivity_mode: %s\n",
+ phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_parameter *phy_parameter;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ seq_puts(s, "Page 0:\n");
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ seq_puts(s, "Page 1:\n");
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto out;
+
+ seq_puts(s, "Page 2:\n");
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+out:
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_dc_cal: %d\n",
+ (int)phy_parameter->efuse_usb_dc_cal);
+ seq_printf(s, " efuse_usb_dc_dis: %d\n",
+ (int)phy_parameter->efuse_usb_dc_dis);
+ seq_printf(s, " inverse_hstx_sync_clock: %s\n",
+ phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
+ seq_printf(s, " driving_level: %d\n",
+ phy_parameter->driving_level);
+ seq_printf(s, " driving_level_compensate: %d\n",
+ phy_parameter->driving_level_compensate);
+ seq_printf(s, " disconnection_compensate: %d\n",
+ phy_parameter->disconnection_compensate);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
+ phy_debug_root);
+
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+ struct soc_device_attribute rtk_soc_groot[] = {
+ { .family = "Realtek Groot",},
+ { /* empty */ } };
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ /* Read efuse for usb dc cal */
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_driving_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_driving_rate;
+
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_cal = -(int8_t)
+ ((EFUS_USB_DC_CAL_MAX & value) * rate);
+
+ if (soc_device_match(rtk_soc_groot)) {
+ dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
+
+ /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
+
+ /* We set max dc cal compensate is 0x8 if otp is 0x7 */
+ if (value == 0x7)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
+ }
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
+ }
+
+ /* Read efuse for usb dc disconnect level */
+ value = 0;
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_disconnect_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_disconnect_rate;
+
+ if (value <= EFUS_USB_DC_DIS_MAX)
+ phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_dis = -(int8_t)
+ ((EFUS_USB_DC_DIS_MAX & value) * rate);
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
+ }
+
+out:
+ return 0;
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct device_node *np = dev->of_node;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
+ phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
+ phy_parameter->phy_reg.vstatus_index = index;
+
+ if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
+ phy_parameter->inverse_hstx_sync_clock = true;
+ else
+ phy_parameter->inverse_hstx_sync_clock = false;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level",
+ index, &phy_parameter->driving_level))
+ phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
+ index, &phy_parameter->driving_level_compensate))
+ phy_parameter->driving_level_compensate = 0;
+
+ if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
+ index, &phy_parameter->disconnection_compensate))
+ phy_parameter->disconnection_compensate = 0;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_dc_driving_level(rtk_phy, phy_parameter);
+
+ update_hs_clk_select(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb2phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret = 0;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = phy_cfg->num_phy;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb2phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x90},
+ [3] = {0xe3, 0x3a},
+ [4] = {0xe4, 0x68},
+ [6] = {0xe6, 0x91},
+ [13] = {0xf5, 0x81},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg_2port = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 2,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0x68}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x18},
+ [4] = {0xe4, 0x6a},
+ [7] = {0xe7, 0x71},
+ [13] = {0xf5, 0x15},
+ [15] = {0xf7, 0x32}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x44}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [0] = {0xe0, 0x01}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1312c_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x14},
+ [4] = {0xe4, 0x67},
+ [5] = {0xe5, 0x55}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x23},
+ [6] = {0xe6, 0x58}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { /* default parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0xa8},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x64}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x45}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8e},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [14] = {0xf5, 0x1}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .check_efuse = true,
+ .num_phy = 1,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1315e_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8c},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [3] = {0xe3, 0x7f},
+ [14] = {0xf5, 0x01}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V2,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
+ { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
+ { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb2phy_driver = {
+ .probe = rtk_usb2phy_probe,
+ .remove_new = rtk_usb2phy_remove,
+ .driver = {
+ .name = "rtk-usb2phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb2phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
new file mode 100644
index 000000000000..dfcf4b921bba
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb3.c RTK usb3.0 phy driver
+ *
+ * copyright (c) 2023 realtek semiconductor corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+
+#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
+#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
+#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
+#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
+
+#define MAX_USB_PHY_DATA_SIZE 0x30
+#define PHY_ADDR_0X09 0x09
+#define PHY_ADDR_0X0B 0x0b
+#define PHY_ADDR_0X0D 0x0d
+#define PHY_ADDR_0X10 0x10
+#define PHY_ADDR_0X1F 0x1f
+#define PHY_ADDR_0X20 0x20
+#define PHY_ADDR_0X21 0x21
+#define PHY_ADDR_0X30 0x30
+
+#define REG_0X09_FORCE_CALIBRATION BIT(9)
+#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
+#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
+#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
+#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
+#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
+
+#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
+#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
+#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
+#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
+#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
+#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
+
+#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
+#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
+
+struct phy_reg {
+ void __iomem *reg_mdio_ctl;
+};
+
+struct phy_data {
+ u8 addr;
+ u16 data;
+};
+
+struct phy_cfg {
+ int param_size;
+ struct phy_data param[MAX_USB_PHY_DATA_SIZE];
+
+ bool check_efuse;
+ bool do_toggle;
+ bool do_toggle_once;
+ bool use_default_parameter;
+ bool check_rx_front_end_offset;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ u8 efuse_usb_u3_tx_lfps_swing_trim;
+
+ /* Get from dts */
+ u32 amplitude_control_coarse;
+ u32 amplitude_control_fine;
+};
+
+struct rtk_phy {
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
+{
+ return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
+}
+
+static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ unsigned int tmp;
+ u32 value;
+
+ tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
+
+ writel(tmp, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ value = readl(phy_reg->reg_mdio_ctl);
+ value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
+
+ return (u16)value;
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
+{
+ unsigned int val;
+
+ val = USB_MDIO_CTRL_PHY_WRITE |
+ (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
+ (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
+
+ writel(val, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ return 0;
+}
+
+static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ struct phy_data *phy_data;
+ u8 addr;
+ u16 data;
+ int i;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ return;
+
+ i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
+ phy_data = phy_cfg->param + i;
+ addr = phy_data->addr;
+ data = phy_data->data;
+
+ if (!addr && !data) {
+ addr = PHY_ADDR_0X09;
+ data = rtk_phy_read(phy_reg, addr);
+ phy_data->addr = addr;
+ phy_data->data = data;
+ }
+
+ rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ int i = 0;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter)
+ goto do_toggle;
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = phy_data->addr;
+ u16 data = phy_data->data;
+
+ if (!addr && !data)
+ continue;
+
+ rtk_phy_write(phy_reg, addr, data);
+ }
+
+do_toggle:
+ if (phy_cfg->do_toggle_once)
+ phy_cfg->do_toggle = true;
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, false);
+
+ if (phy_cfg->do_toggle_once) {
+ u16 check_value = 0;
+ int count = 10;
+ u16 value_0x0d, value_0x10;
+
+ /* Enable Debug mode by set 0x0D and 0x10 */
+ value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
+ value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
+
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
+ value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10,
+ (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
+ REG_0X10_DEBUG_MODE_SETTING);
+
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+
+ while (!(check_value & BIT(15))) {
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+ mdelay(1);
+ if (count-- < 0)
+ break;
+ }
+
+ if (!(check_value & BIT(15)))
+ dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
+ PHY_ADDR_0X30, check_value);
+
+ /* Disable Debug mode by set 0x0D and 0x10 to default*/
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
+
+ phy_cfg->do_toggle = false;
+ }
+
+ if (phy_cfg->check_rx_front_end_offset) {
+ u16 rx_offset_code, rx_offset_range;
+ u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
+ u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
+ bool do_update = false;
+
+ rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
+ if (((rx_offset_code & code_mask) == 0x0) ||
+ ((rx_offset_code & code_mask) == code_mask))
+ do_update = true;
+
+ rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
+ if (((rx_offset_range & range_mask) == range_mask) && do_update) {
+ dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
+ rx_offset_code, rx_offset_range);
+ do_update = false;
+ }
+
+ if (do_update) {
+ u16 tmp1, tmp2;
+
+ tmp1 = rx_offset_range & (~range_mask);
+ tmp2 = rx_offset_range & range_mask;
+ tmp2 += (1 << 2);
+ rx_offset_range = tmp1 | (tmp2 & range_mask);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
+ goto do_toggle;
+ }
+ }
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ int ret = 0;
+ int i;
+ unsigned long phy_init_time = jiffies;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static void rtk_phy_toggle(struct rtk_phy *rtk_phy, bool connect, int port)
+{
+ int index = port;
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_connect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, true, port);
+
+ return 0;
+}
+
+static int rtk_phy_disconnect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, false, port);
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .connect = rtk_phy_connect,
+ .disconnect = rtk_phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_once: %s\n",
+ phy_cfg->do_toggle_once ? "Enable" : "Disable");
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
+ u16 data = phy_data->data;
+
+ if (!phy_data->addr && !data)
+ seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
+ addr, rtk_phy_read(phy_reg, addr));
+ else
+ seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
+ addr, data, rtk_phy_read(phy_reg, addr));
+ }
+
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
+ (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
+ seq_printf(s, " amplitude_control_coarse: 0x%x\n",
+ (int)phy_parameter->amplitude_control_coarse);
+ seq_printf(s, " amplitude_control_fine: 0x%x\n",
+ (int)phy_parameter->amplitude_control_fine);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
+
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (value > 0 && value < 0x8)
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
+ else
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
+
+out:
+ return 0;
+}
+
+static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= (~val_mask);
+ data |= (phy_parameter->amplitude_control_coarse & val_mask);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
+ u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
+ u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= ~(val_mask << val_shift);
+ data |= ((efuse_val & val_mask) << val_shift);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
+
+ if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
+ phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
+
+ phy_cfg->param[PHY_ADDR_0X21].data =
+ phy_parameter->amplitude_control_fine & val_mask;
+ }
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
+
+ /* Amplitude control address 0x20 bit 0 to bit 7 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
+ &phy_parameter->amplitude_control_coarse))
+ phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
+
+ /* Amplitude control address 0x21 bit 0 to bit 16 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
+ &phy_parameter->amplitude_control_fine))
+ phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_amplitude_control_value(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb3phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = 1;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb3phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
+ [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
+ [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
+ [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
+ [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
+ [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
+ [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
+ [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
+ [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
+ [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
+ [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
+ [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
+ [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
+ [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
+ [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
+ [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
+ [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
+ [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
+ [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
+ [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
+ [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
+ [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
+ [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
+ [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [8] = {0x08, 0x3591},
+ [38] = {0x26, 0x840b},
+ [40] = {0x28, 0xf842}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac86},
+ [6] = {0x06, 0x0003},
+ [9] = {0x09, 0x924c},
+ [10] = {0x0a, 0xa608},
+ [11] = {0x0b, 0xb905},
+ [14] = {0x0e, 0x2010},
+ [32] = {0x20, 0x705a},
+ [33] = {0x21, 0xf645},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xcb66},
+ [41] = {0x29, 0xff00}, },
+ .check_efuse = true,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac8c},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x724c},
+ [10] = {0x0a, 0xb610},
+ [11] = {0x0b, 0xb90d},
+ [13] = {0x0d, 0xef2a},
+ [15] = {0x0f, 0x9050},
+ [16] = {0x10, 0x000c},
+ [32] = {0x20, 0x70ff},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xdb66},
+ [38] = {0x26, 0x8609},
+ [41] = {0x29, 0xff13},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac89},
+ [4] = {0x04, 0xf2f5},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x424c},
+ [10] = {0x0a, 0x9610},
+ [11] = {0x0b, 0x9901},
+ [12] = {0x0c, 0xf000},
+ [13] = {0x0d, 0xef2a},
+ [14] = {0x0e, 0x1000},
+ [15] = {0x0f, 0x9050},
+ [32] = {0x20, 0x7077},
+ [35] = {0x23, 0x0b62},
+ [37] = {0x25, 0x10ec},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb3phy_driver = {
+ .probe = rtk_usb3phy_probe,
+ .remove_new = rtk_usb3phy_remove,
+ .driver = {
+ .name = "rtk-usb3phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb3phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 0dc86a7740e3..cfdb54b6070a 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+int tegra_xusb_padctl_get_port_number(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane;
+
+ if (!phy)
+ return -ENODEV;
+
+ lane = phy_get_drvdata(phy);
+
+ return lane->index;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 2b2f14a1b711..4d305876ec08 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -24,6 +24,23 @@
#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+static void cros_typec_role_switch_quirk(struct fwnode_handle *fwnode)
+{
+#ifdef CONFIG_ACPI
+ struct fwnode_handle *switch_fwnode;
+
+ /* Supply the USB role switch with the correct pld_crc if it's missing. */
+ switch_fwnode = fwnode_find_reference(fwnode, "usb-role-switch", 0);
+ if (!IS_ERR_OR_NULL(switch_fwnode)) {
+ struct acpi_device *adev = to_acpi_device_node(switch_fwnode);
+
+ if (adev && !adev->pld_crc)
+ adev->pld_crc = to_acpi_device_node(fwnode)->pld_crc;
+ fwnode_handle_put(switch_fwnode);
+ }
+#endif
+}
+
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
struct device *dev)
@@ -66,6 +83,8 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
cap->prefer_role = ret;
}
+ cros_typec_role_switch_quirk(fwnode);
+
cap->fwnode = fwnode;
return 0;
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index c8b3d7b78098..b44b32dcb832 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I$(src)
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d997a4c545f7..4bdb2d45e0bf 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -15,6 +15,8 @@
#include "ctl.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4
@@ -32,6 +34,7 @@
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback
+ * @index: Domain number. This will be output with the trace record.
*/
struct tb_ctl {
struct tb_nhi *nhi;
@@ -47,6 +50,8 @@ struct tb_ctl {
int timeout_msec;
event_cb callback;
void *callback_data;
+
+ int index;
};
@@ -369,6 +374,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
+
+ trace_tb_tx(ctl->index, type, data, len);
+
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
@@ -384,6 +392,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
+ trace_tb_event(ctl->index, type, pkg->buffer, size);
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
@@ -489,6 +498,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
* triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
+
+ trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
+
if (req) {
if (req->copy(req, pkg))
schedule_work(&req->work);
@@ -614,6 +626,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/**
* tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI
+ * @index: Domain number
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events
* @cb_data: Data passed to @cb
@@ -622,14 +635,16 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
*
* Return: Returns a pointer on success or NULL on failure.
*/
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
+
ctl->nhi = nhi;
+ ctl->index = index;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb;
ctl->callback_data = cb_data;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index eec5c953c743..bf930a191472 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -21,8 +21,8 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9fb1a64f3300..0023017299f7 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -321,12 +321,12 @@ static void tb_domain_release(struct device *dev)
tb_ctl_free(tb->ctl);
destroy_workqueue(tb->wq);
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
mutex_destroy(&tb->lock);
kfree(tb);
}
-struct device_type tb_domain_type = {
+const struct device_type tb_domain_type = {
.name = "thunderbolt_domain",
.release = tb_domain_release,
};
@@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
tb->nhi = nhi;
mutex_init(&tb->lock);
- tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
if (tb->index < 0)
goto err_free;
@@ -397,7 +397,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
if (!tb->wq)
goto err_remove_ida;
- tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
+ tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
@@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida:
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
err_free:
kfree(tb);
@@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
+ * @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
- ret = tb->cm_ops->start(tb);
+ ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}
@@ -505,6 +506,10 @@ void tb_domain_remove(struct tb *tb)
mutex_unlock(&tb->lock);
flush_workqueue(tb->wq);
+
+ if (tb->cm_ops->deinit)
+ tb->cm_ops->deinit(tb);
+
device_unregister(&tb->dev);
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 56790d50f9e3..baf10d099c77 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 633970fbe9b0..63cb4b6afb71 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
+
#include "tb.h"
/**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 mode;
+
+ if (sw->generation < 2)
+ return -EINVAL;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode |= TB_LC_PORT_MODE_DPR;
+
+ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode &= ~TB_LC_PORT_MODE_DPR;
+
+ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index fb4f46e51753..7af2642b97cb 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -48,7 +48,7 @@
static bool host_reset = true;
module_param(host_reset, bool, 0444);
-MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
@@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
return 0;
err_ida_remove:
- ida_simple_remove(&nhi->msix_ida, ring->vector);
+ ida_free(&nhi->msix_ida, ring->vector);
return ret;
}
@@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
@@ -1364,7 +1364,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
-
nhi_reset(nhi);
res = nhi_init_msi(nhi);
@@ -1392,7 +1391,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 69fb3b0fa34f..8901db2de327 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
if (!nvm)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvm_ida, GFP_KERNEL);
if (ret < 0) {
kfree(nvm);
return ERR_PTR(ret);
@@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
nvmem_unregister(nvm->non_active);
nvmem_unregister(nvm->active);
vfree(nvm->buf);
- ida_simple_remove(&nvm_ida, nvm->id);
+ ida_free(&nvm_ida, nvm->id);
}
kfree(nvm);
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 091a81bbdbdc..f760e54cd9bd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+ return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index e6bfa63b40ae..e81de9c30eac 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index d49d6628dbf2..6bb49bdcd6c1 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -356,7 +356,7 @@ static void tb_retimer_release(struct device *dev)
kfree(rt);
}
-struct device_type tb_retimer_type = {
+const struct device_type tb_retimer_type = {
.name = "thunderbolt_retimer",
.groups = retimer_groups,
.release = tb_retimer_release,
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index fad40c4bc710..6ffc4e81ffed 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
+static int tb_port_reset(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ return port->cap_usb4 ? usb4_port_reset(port) : 0;
+ return tb_lc_reset_port(port);
+}
+
/*
* tb_init_port() - initialize a port
*
@@ -771,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
if (max_hopid < 0 || max_hopid > port_max_hopid)
max_hopid = port_max_hopid;
- return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+ return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
}
/**
@@ -809,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
*/
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->in_hopids, hopid);
+ ida_free(&port->in_hopids, hopid);
}
/**
@@ -819,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid)
*/
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->out_hopids, hopid);
+ ida_free(&port->out_hopids, hopid);
}
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
@@ -1120,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
- goto err_lane0;
+ goto err_lane1;
}
/*
@@ -1534,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+ if (sw->generation > 1) {
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ int i, ret;
+
+ /*
+ * For lane adapters we issue downstream port
+ * reset and clear up path config spaces.
+ *
+ * For protocol adapters we disable the path and
+ * clear path config space one by one (from 8 to
+ * Max Input HopID of the adapter).
+ */
+ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+ ret = tb_port_reset(port);
+ if (ret)
+ return ret;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ tb_usb3_port_enable(port, false);
+ } else if (tb_port_is_dpin(port) ||
+ tb_port_is_dpout(port)) {
+ tb_dp_port_enable(port, false);
+ } else if (tb_port_is_pcie_down(port) ||
+ tb_port_is_pcie_up(port)) {
+ tb_pci_port_enable(port, false);
+ } else {
+ continue;
+ }
+
+ /* Cleanup path config space of protocol adapter */
+ for (i = TB_PATH_MIN_HOPID;
+ i <= port->config.max_in_hop_id; i++) {
+ ret = tb_path_deactivate_hop(port, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ struct tb_cfg_result res;
+
+ /* Thunderbolt 1 uses the "reset" config space packet */
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+ if (res.err > 0)
+ return -EIO;
+ else if (res.err < 0)
+ return res.err;
+ }
+
+ return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+ return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Read directly from the hardware because we use this also
+ * during system sleep where sw->config.enabled is already set
+ * by us.
+ */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ROUTER_CS_3_V);
+}
+
/**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
*
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
- struct tb_cfg_result res;
+ int ret;
- if (sw->generation > 1)
+ /*
+ * We cannot access the port config spaces unless the router is
+ * already enumerated. If the router is not enumerated it is
+ * equal to being reset so we can skip that here.
+ */
+ if (!tb_switch_enumerated(sw))
return 0;
- tb_sw_dbg(sw, "resetting switch\n");
+ tb_sw_dbg(sw, "resetting\n");
- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
- TB_CFG_SWITCH, 2, 2);
- if (res.err)
- return res.err;
- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
- if (res.err > 0)
- return -EIO;
- return res.err;
+ if (tb_route(sw))
+ ret = tb_switch_reset_device(sw);
+ else
+ ret = tb_switch_reset_host(sw);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to reset\n");
+
+ return ret;
}
/**
@@ -2228,7 +2330,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
NULL)
};
-struct device_type tb_switch_type = {
+const struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
.uevent = tb_switch_uevent,
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 846d2813bb1a..c5ce7a694b27 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -17,6 +17,7 @@
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
+#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
@@ -75,112 +76,6 @@ struct tb_hotplug_event {
bool unplug;
};
-static void tb_init_bandwidth_groups(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- group->tb = tcm_to_tb(tcm);
- group->index = i + 1;
- INIT_LIST_HEAD(&group->ports);
- }
-}
-
-static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
- struct tb_port *in)
-{
- if (!group || WARN_ON(in->group))
- return;
-
- in->group = group;
- list_add_tail(&in->group_list, &group->ports);
-
- tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
-}
-
-static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (list_empty(&group->ports))
- return group;
- }
-
- return NULL;
-}
-
-static struct tb_bandwidth_group *
-tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- struct tb_bandwidth_group *group;
- struct tb_tunnel *tunnel;
-
- /*
- * Find all DP tunnels that go through all the same USB4 links
- * as this one. Because we always setup tunnels the same way we
- * can just check for the routers at both ends of the tunnels
- * and if they are the same we have a match.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (!tb_tunnel_is_dp(tunnel))
- continue;
-
- if (tunnel->src_port->sw == in->sw &&
- tunnel->dst_port->sw == out->sw) {
- group = tunnel->src_port->group;
- if (group) {
- tb_bandwidth_group_attach_port(group, in);
- return group;
- }
- }
- }
-
- /* Pick up next available group then */
- group = tb_find_free_bandwidth_group(tcm);
- if (group)
- tb_bandwidth_group_attach_port(group, in);
- else
- tb_port_warn(in, "no available bandwidth groups\n");
-
- return group;
-}
-
-static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- if (usb4_dp_port_bandwidth_mode_enabled(in)) {
- int index, i;
-
- index = usb4_dp_port_group_id(in);
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- if (tcm->groups[i].index == index) {
- tb_bandwidth_group_attach_port(&tcm->groups[i], in);
- return;
- }
- }
- }
-
- tb_attach_bandwidth_group(tcm, in, out);
-}
-
-static void tb_detach_bandwidth_group(struct tb_port *in)
-{
- struct tb_bandwidth_group *group = in->group;
-
- if (group) {
- in->group = NULL;
- list_del_init(&in->group_list);
-
- tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
- }
-}
-
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -472,34 +367,6 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
}
}
-static void tb_discover_tunnels(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_tunnel *tunnel;
-
- tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
-
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_pci(tunnel)) {
- struct tb_switch *parent = tunnel->dst_port->sw;
-
- while (parent != tunnel->src_port->sw) {
- parent->boot = true;
- parent = tb_switch_parent(parent);
- }
- } else if (tb_tunnel_is_dp(tunnel)) {
- struct tb_port *in = tunnel->src_port;
- struct tb_port *out = tunnel->dst_port;
-
- /* Keep the domain from powering down */
- pm_runtime_get_sync(&in->sw->dev);
- pm_runtime_get_sync(&out->sw->dev);
-
- tb_discover_bandwidth_group(tcm, in, out);
- }
- }
-}
-
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
@@ -681,6 +548,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* Calculates consumed DP bandwidth at @port between path from @src_port
* to @dst_port. Does not take tunnel starting from @src_port and ending
* from @src_port into account.
+ *
+ * If there is bandwidth reserved for any of the groups between
+ * @src_port and @dst_port (but not yet used) that is also taken into
+ * account in the returned consumed bandwidth.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -689,9 +560,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
int *consumed_up,
int *consumed_down)
{
+ int group_reserved[MAX_GROUPS] = {};
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
- int ret;
+ bool downstream;
+ int i, ret;
*consumed_up = *consumed_down = 0;
@@ -700,6 +573,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ const struct tb_bandwidth_group *group;
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
@@ -712,6 +586,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
continue;
/*
+ * Calculate what is reserved for groups crossing the
+ * same ports only once (as that is reserved for all the
+ * tunnels in the group).
+ */
+ group = tunnel->src_port->group;
+ if (group && group->reserved && !group_reserved[group->index])
+ group_reserved[group->index] = group->reserved;
+
+ /*
* Ignore the DP tunnel between src_port and dst_port
* because it is the same tunnel and we may be
* re-calculating estimated bandwidth.
@@ -729,6 +612,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
*consumed_down += dp_consumed_down;
}
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
+ if (downstream)
+ *consumed_down += group_reserved[i];
+ else
+ *consumed_up += group_reserved[i];
+ }
+
return 0;
}
@@ -1181,8 +1072,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @tb: Domain structure
* @src_port: Source adapter to start the transition
* @dst_port: Destination adapter
- * @requested_up: New lower bandwidth request upstream (Mb/s)
- * @requested_down: New lower bandwidth request downstream (Mb/s)
* @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
@@ -1190,8 +1079,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int requested_up,
- int requested_down, bool keep_asym)
+ struct tb_port *dst_port, bool keep_asym)
{
bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
@@ -1230,10 +1118,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
* guard band 10%) as the link was configured asymmetric
* already.
*/
- if (consumed_down + requested_down >= asym_threshold)
+ if (consumed_down >= asym_threshold)
continue;
} else {
- if (consumed_up + requested_up >= asym_threshold)
+ if (consumed_up >= asym_threshold)
continue;
}
@@ -1306,7 +1194,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0, false);
+ tb_configure_sym(tb, host_port, up, false);
}
/* Set the link configured */
@@ -1464,6 +1352,297 @@ out_rpm_put:
}
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down, true);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ if (tb_tunnel_direction_downstream(tunnel))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ /*
+ * If there is reserved bandwidth for the group that is
+ * not yet released we report that too.
+ */
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
+ estimated_bw, group->reserved,
+ estimated_bw + group->reserved);
+
+ if (usb4_dp_port_set_estimated_bandwidth(in,
+ estimated_bw + group->reserved))
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
+static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
+{
+ if (group->reserved) {
+ tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
+ group->reserved);
+ group->reserved = 0;
+ return true;
+ }
+ return false;
+}
+
+static void __configure_group_sym(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (list_empty(&group->ports))
+ return;
+
+ /*
+ * All the tunnels in the group go through the same USB4 links
+ * so we find the first one here and pass the IN and OUT
+ * adapters to tb_configure_sym() which now transitions the
+ * links back to symmetric if bandwidth requirement < asym_threshold.
+ *
+ * We do this here to avoid unnecessary transitions (for example
+ * if the graphics released bandwidth for other tunnel in the
+ * same group).
+ */
+ in = list_first_entry(&group->ports, struct tb_port, group_list);
+ tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
+ if (tunnel)
+ tb_configure_sym(group->tb, in, tunnel->dst_port, true);
+}
+
+static void tb_bandwidth_group_release_work(struct work_struct *work)
+{
+ struct tb_bandwidth_group *group =
+ container_of(work, typeof(*group), release_work.work);
+ struct tb *tb = group->tb;
+
+ mutex_lock(&tb->lock);
+ if (__release_group_bandwidth(group))
+ tb_recalc_estimated_bandwidth(tb);
+ __configure_group_sym(group);
+ mutex_unlock(&tb->lock);
+}
+
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ INIT_DELAYED_WORK(&group->release_work,
+ tb_bandwidth_group_release_work);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+
+ /* No more tunnels so release the reserved bandwidth if any */
+ if (list_empty(&group->ports)) {
+ cancel_delayed_work(&group->release_work);
+ __release_group_bandwidth(group);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
+ }
+ }
+}
+
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
@@ -1491,7 +1670,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
+ tb_configure_sym(tb, src_port, dst_port, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1605,101 +1784,6 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static void
-tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
-{
- struct tb_tunnel *first_tunnel;
- struct tb *tb = group->tb;
- struct tb_port *in;
- int ret;
-
- tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
- group->index);
-
- first_tunnel = NULL;
- list_for_each_entry(in, &group->ports, group_list) {
- int estimated_bw, estimated_up, estimated_down;
- struct tb_tunnel *tunnel;
- struct tb_port *out;
-
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
- continue;
-
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (WARN_ON(!tunnel))
- break;
-
- if (!first_tunnel) {
- /*
- * Since USB3 bandwidth is shared by all DP
- * tunnels under the host router USB4 port, even
- * if they do not begin from the host router, we
- * can release USB3 bandwidth just once and not
- * for each tunnel separately.
- */
- first_tunnel = tunnel;
- ret = tb_release_unused_usb3_bandwidth(tb,
- first_tunnel->src_port, first_tunnel->dst_port);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to release unused bandwidth\n");
- break;
- }
- }
-
- out = tunnel->dst_port;
- ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down, true);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to re-calculate estimated bandwidth\n");
- break;
- }
-
- /*
- * Estimated bandwidth includes:
- * - already allocated bandwidth for the DP tunnel
- * - available bandwidth along the path
- * - bandwidth allocated for USB 3.x but not used.
- */
- tb_tunnel_dbg(tunnel,
- "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
-
- if (tb_port_path_direction_downstream(in, out))
- estimated_bw = estimated_down;
- else
- estimated_bw = estimated_up;
-
- if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_tunnel_warn(tunnel,
- "failed to update estimated bandwidth\n");
- }
-
- if (first_tunnel)
- tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
- first_tunnel->dst_port);
-
- tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
-}
-
-static void tb_recalc_estimated_bandwidth(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- int i;
-
- tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (!list_empty(&group->ports))
- tb_recalc_estimated_bandwidth_for_group(group);
- }
-
- tb_dbg(tb, "bandwidth re-calculation done\n");
-}
-
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -1737,49 +1821,15 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- struct tb_port *port, *in, *out;
int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
- * Find pair of inactive DP IN and DP OUT adapters and then
- * establish a DP tunnel between them.
- */
- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
-
- in = NULL;
- out = NULL;
- list_for_each_entry(port, &tcm->dp_resources, list) {
- if (!tb_port_is_dpin(port))
- continue;
-
- if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "DP IN in use\n");
- continue;
- }
-
- in = port;
- tb_port_dbg(in, "DP IN available\n");
-
- out = tb_find_dp_out(tb, port);
- if (out)
- break;
- }
-
- if (!in) {
- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return false;
- }
- if (!out) {
- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return false;
- }
-
- /*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
@@ -1839,15 +1889,19 @@ static bool tb_tunnel_one_dp(struct tb *tb)
goto err_free;
}
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
+ if (ret)
+ goto err_deactivate;
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_reclaim_usb3_bandwidth(tb, in, out);
/*
* Transition the links to asymmetric if the consumption exceeds
* the threshold.
*/
- if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1859,6 +1913,8 @@ static bool tb_tunnel_one_dp(struct tb *tb)
tb_increase_tmu_accuracy(tunnel);
return true;
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
err_reclaim_usb:
@@ -1878,13 +1934,86 @@ err_rpm_put:
static void tb_tunnel_dp(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
- while (tb_tunnel_one_dp(tb))
- ;
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP IN in use\n");
+ continue;
+ }
+
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out)
+ tb_tunnel_one_dp(tb, in, out);
+ else
+ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
+ }
+
+ if (!in)
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+}
+
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -1903,7 +2032,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1930,6 +2062,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_port_dbg(port, "DP %s resource available after hotplug\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -2243,8 +2376,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb_bandwidth_group *group;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
+ bool downstream;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
@@ -2270,11 +2405,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
- return ret;
+ goto fail;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
- return ret;
+ goto fail;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
@@ -2304,24 +2439,48 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
"bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto fail;
}
+ downstream = tb_tunnel_direction_downstream(tunnel);
+ group = in->group;
+
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
- /*
- * If bandwidth on a link is < asym_threshold transition
- * the link to symmetric.
- */
- tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
- /*
- * If requested bandwidth is less or equal than what is
- * currently allocated to that tunnel we simply change
- * the reservation of the tunnel. Since all the tunnels
- * going out from the same USB4 port are in the same
- * group the released bandwidth will be taken into
- * account for the other tunnels automatically below.
- */
+ if (tunnel->bw_mode) {
+ int reserved;
+ /*
+ * If requested bandwidth is less or equal than
+ * what is currently allocated to that tunnel we
+ * simply change the reservation of the tunnel
+ * and add the released bandwidth for the group
+ * for the next 10s. Then we release it for
+ * others to use.
+ */
+ if (downstream)
+ reserved = allocated_down - *requested_down;
+ else
+ reserved = allocated_up - *requested_up;
+
+ if (reserved > 0) {
+ group->reserved += reserved;
+ tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
+ group->index, reserved, group->reserved);
+
+ /*
+ * If it was not already pending,
+ * schedule release now. If it is then
+ * postpone it for the next 10s (unless
+ * it is already running in which case
+ * the 10s already expired and we should
+ * give the reserved back to others).
+ */
+ mod_delayed_work(system_wq, &group->release_work,
+ msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
+ }
+ }
+
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
@@ -2332,7 +2491,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
- return ret;
+ goto fail;
/*
* Then go over all tunnels that cross the same USB4 ports (they
@@ -2344,11 +2503,15 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if (ret)
goto reclaim;
- tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
+ available_up, available_down, group->reserved);
+
+ if ((*requested_up >= 0 &&
+ available_up + group->reserved >= requested_up_corrected) ||
+ (*requested_down >= 0 &&
+ available_down + group->reserved >= requested_down_corrected)) {
+ int released = 0;
- if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
- (*requested_down >= 0 && available_down >= requested_down_corrected)) {
/*
* If bandwidth on a link is >= asym_threshold
* transition the link to asymmetric.
@@ -2356,15 +2519,28 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0, true);
- return ret;
+ tb_configure_sym(tb, in, out, true);
+ goto fail;
}
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0, true);
+ tb_configure_sym(tb, in, out, true);
+ }
+
+ if (downstream) {
+ if (*requested_down > available_down)
+ released = *requested_down - available_down;
+ } else {
+ if (*requested_up > available_up)
+ released = *requested_up - available_up;
+ }
+ if (released) {
+ group->reserved -= released;
+ tb_dbg(tb, "group %d released %d total %d Mb/s\n",
+ group->index, released, group->reserved);
}
} else {
ret = -ENOBUFS;
@@ -2372,6 +2548,18 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
+fail:
+ if (ret && ret != -ENODEV) {
+ /*
+ * Write back the same allocated (so no change), this
+ * makes the DPTX request fail on graphics side.
+ */
+ tb_tunnel_dbg(tunnel,
+ "failing the request by rewriting allocated %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+ tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ }
+
return ret;
}
@@ -2379,11 +2567,11 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
- struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
+ struct tb_port *in;
pm_runtime_get_sync(&tb->dev);
@@ -2406,32 +2594,48 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto put_sw;
+ }
+
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
- tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ if (tunnel->bw_mode) {
+ /*
+ * Reset the tunnel back to use the legacy
+ * allocation.
+ */
+ tunnel->bw_mode = false;
+ tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
+ } else {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ }
goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
- if (ret == -ENODATA)
- tb_port_dbg(in, "no bandwidth request active\n");
- else
+ if (ret == -ENODATA) {
+ /*
+ * There is no request active so this means the
+ * BW allocation mode was enabled from graphics
+ * side. At this point we know that the graphics
+ * driver has read the DRPX capabilities so we
+ * can offer an better bandwidth estimatation.
+ */
+ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
+ tb_recalc_estimated_bandwidth(tb);
+ } else {
tb_port_warn(in, "failed to read requested bandwidth\n");
+ }
goto put_sw;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (!tunnel) {
- tb_port_warn(in, "failed to find tunnel\n");
- goto put_sw;
- }
-
- out = tunnel->dst_port;
-
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -2560,6 +2764,16 @@ static void tb_stop(struct tb *tb)
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static void tb_deinit(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ /* Cancel all the release bandwidth workers */
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
+ cancel_delayed_work_sync(&tcm->groups[i].release_work);
+}
+
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
@@ -2581,9 +2795,10 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
+ bool discover = true;
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
@@ -2622,12 +2837,28 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
- /* Full scan to discover devices added before the driver was loaded. */
- tb_scan_switch(tb->root_switch);
- /* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb);
- /* Add DP resources from the DP tunnels created by the boot firmware */
- tb_discover_dp_resources(tb);
+
+ /*
+ * Boot firmware might have created tunnels of its own. Since we
+ * cannot be sure they are usable for us, tear them down and
+ * reset the ports to handle it as new hotplug for USB4 v1
+ * routers (for USB4 v2 and beyond we already do host reset).
+ */
+ if (reset && tb_switch_is_usb4(tb->root_switch)) {
+ discover = false;
+ if (usb4_switch_version(tb->root_switch) == 1)
+ tb_switch_reset(tb->root_switch);
+ }
+
+ if (discover) {
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
+ }
+
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -2698,8 +2929,12 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
- /* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb->root_switch);
+ /*
+ * For non-USB4 hosts (Apple systems) remove any PCIe devices
+ * the firmware might have setup.
+ */
+ if (!tb_switch_is_usb4(tb->root_switch))
+ tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
@@ -2847,6 +3082,7 @@ static int tb_runtime_resume(struct tb *tb)
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
+ .deinit = tb_deinit,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 997c5a536905..feed8ecaf712 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,6 +23,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -217,6 +219,11 @@ struct tb_switch {
* @tb: Pointer to the domain the group belongs to
* @index: Index of the group (aka Group_ID). Valid values %1-%7
* @ports: DP IN adapters belonging to this group are linked here
+ * @reserved: Bandwidth released by one tunnel in the group, available
+ * to others. This is reported as part of estimated_bw for
+ * the group.
+ * @release_work: Worker to release the @reserved if it is not used by
+ * any of the tunnels.
*
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
* attached to a bandwidth group. All tunnels going through the same
@@ -227,6 +234,8 @@ struct tb_bandwidth_group {
struct tb *tb;
int index;
struct list_head ports;
+ int reserved;
+ struct delayed_work release_work;
};
/**
@@ -258,6 +267,7 @@ struct tb_bandwidth_group {
* @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -286,6 +296,7 @@ struct tb_port {
struct tb_bandwidth_group *group;
struct list_head group_list;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -452,6 +463,8 @@ struct tb_path {
* ICM to send driver ready message to the firmware.
* @start: Starts the domain
* @stop: Stops the domain
+ * @deinit: Perform any cleanup after the domain is stopped but before
+ * it is unregistered. Called without @tb->lock taken. Optional.
* @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
@@ -483,8 +496,9 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
- int (*start)(struct tb *tb);
+ int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
+ void (*deinit)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
@@ -735,10 +749,10 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct device_type tb_domain_type;
-extern struct device_type tb_retimer_type;
-extern struct device_type tb_switch_type;
-extern struct device_type usb4_port_device_type;
+extern const struct device_type tb_domain_type;
+extern const struct device_type tb_retimer_type;
+extern const struct device_type tb_switch_type;
+extern const struct device_type usb4_port_device_type;
int tb_domain_init(void);
void tb_domain_exit(void);
@@ -746,7 +760,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@@ -1150,6 +1164,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@@ -1169,6 +1184,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1301,6 +1317,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f798f6a2b84..4e43b47f9f11 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
+#define ROUTER_CS_3 0x03
+#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
+#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
+#define TB_LC_PORT_MODE 0x26
+#define TB_LC_PORT_MODE_DPR BIT(0)
+
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h
new file mode 100644
index 000000000000..4dccfcf7af6a
--- /dev/null
+++ b/drivers/thunderbolt/trace.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt tracing support
+ *
+ * Copyright (C) 2024, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Gil Fine <gil.fine@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt
+
+#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define TB_TRACE_H_
+
+#include <linux/trace_seq.h>
+#include <linux/tracepoint.h>
+
+#include "tb_msgs.h"
+
+#define tb_cfg_type_name(type) { type, #type }
+#define show_type_name(val) \
+ __print_symbolic(val, \
+ tb_cfg_type_name(TB_CFG_PKG_READ), \
+ tb_cfg_type_name(TB_CFG_PKG_WRITE), \
+ tb_cfg_type_name(TB_CFG_PKG_ERROR), \
+ tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \
+ tb_cfg_type_name(TB_CFG_PKG_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \
+ tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \
+ tb_cfg_type_name(TB_CFG_PKG_RESET), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_RESP))
+
+#ifndef TB_TRACE_HELPERS
+#define TB_TRACE_HELPERS
+static inline const char *show_data_read_write(struct trace_seq *p,
+ const u32 *data)
+{
+ const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, ",
+ msg->addr.offset, msg->addr.length, msg->addr.port,
+ msg->addr.space, msg->addr.seq);
+
+ return ret;
+}
+
+static inline const char *show_data_error(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "error=%#x, port=%d, plug=%#x, ", msg->error,
+ msg->port, msg->pg);
+
+ return ret;
+}
+
+static inline const char *show_data_event(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "port=%d, unplug=%#x, ", msg->port, msg->unplug);
+
+ return ret;
+}
+
+static inline const char *show_route(struct trace_seq *p, const u32 *data)
+{
+ const struct tb_cfg_header *header = (const struct tb_cfg_header *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "route=%llx, ", tb_cfg_get_route(header));
+
+ return ret;
+}
+
+static inline const char *show_data(struct trace_seq *p, u8 type,
+ const u32 *data, u32 length)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ const char *prefix = "";
+ int i;
+
+ show_route(p, data);
+
+ switch (type) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ show_data_read_write(p, data);
+ break;
+
+ case TB_CFG_PKG_ERROR:
+ show_data_error(p, data);
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ show_data_event(p, data);
+ break;
+
+ default:
+ break;
+ }
+
+ trace_seq_printf(p, "data=[");
+ for (i = 0; i < length; i++) {
+ trace_seq_printf(p, "%s0x%08x", prefix, data[i]);
+ prefix = ", ";
+ }
+ trace_seq_printf(p, "]");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+#endif
+
+DECLARE_EVENT_CLASS(tb_raw,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ ),
+ TP_printk("type=%s, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+DEFINE_EVENT(tb_raw, tb_tx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+DEFINE_EVENT(tb_raw, tb_event,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+TRACE_EVENT(tb_rx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped),
+ TP_ARGS(index, type, data, size, dropped),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ __field(bool, dropped)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ __entry->dropped = dropped;
+ ),
+ TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->dropped,
+ __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+#endif /* TB_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6fffb2c82d3d..cb6609a56a03 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
@@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
-/* max_bw is rounded up to next granularity */
+/**
+ * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
+ * @tunnel: DP tunnel to check
+ * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
+ *
+ * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
- int *max_bw)
+ int *max_bw_rounded)
{
struct tb_port *in = tunnel->src_port;
- int ret, rate, lanes, nrd_bw;
+ int ret, rate, lanes, max_bw;
u32 cap;
/*
@@ -947,41 +953,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
- if (tb_dp_is_uhbr_rate(rate)) {
- /*
- * When UHBR is used there is no reduction in lanes so
- * we can use this directly.
- */
- lanes = tb_dp_cap_get_lanes(cap);
- } else {
- /*
- * If there is no UHBR supported then check the
- * non-reduced rate and lanes.
- */
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
- if (ret)
- return ret;
- }
+ lanes = tb_dp_cap_get_lanes(cap);
- nrd_bw = tb_dp_bandwidth(rate, lanes);
+ max_bw = tb_dp_bandwidth(rate, lanes);
- if (max_bw) {
+ if (max_bw_rounded) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
- *max_bw = roundup(nrd_bw, ret);
+ *max_bw_rounded = roundup(max_bw, ret);
}
- return nrd_bw;
+ return max_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
@@ -995,13 +986,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -1015,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
@@ -1023,20 +1013,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
+ &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -1053,26 +1044,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int max_bw, ret, tmp;
+ int max_bw_rounded, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, out)) {
- tmp = min(*alloc_down, max_bw);
+ if (tb_tunnel_direction_downstream(tunnel)) {
+ tmp = min(*alloc_down, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
- tmp = min(*alloc_up, max_bw);
+ tmp = min(*alloc_up, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
@@ -1150,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
- struct tb_port *in = tunnel->src_port;
int ret;
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1174,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
- struct tb_port *in = tunnel->src_port;
- const struct tb_switch *sw = in->sw;
+ const struct tb_switch *sw = tunnel->src_port->sw;
u32 rate = 0, lanes = 0;
int ret;
@@ -1196,17 +1184,13 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
+ * reduced one). According to VESA spec, the DPRX
+ * negotiation shall compete in 5 seconds after tunnel
+ * established. We give it 100ms extra just in case.
*/
- ret = tb_dp_wait_dprx(tunnel, 150);
- if (ret) {
- if (ret == -ETIMEDOUT)
- ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
- &rate, &lanes);
- if (ret)
- return ret;
- }
+ ret = tb_dp_wait_dprx(tunnel, 5100);
+ if (ret)
+ return ret;
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
if (ret)
return ret;
@@ -1221,7 +1205,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index b4cff5482112..1a27ccd08b86 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -139,6 +139,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
+static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
+{
+ return tb_port_path_direction_downstream(tunnel->src_port,
+ tunnel->dst_port);
+}
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1515eff8cc3e..9860b49d7a2b 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1113,6 +1113,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val |= PORT_CS_19_DPR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_DPR;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -2819,8 +2858,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
- if (val & ADP_DP_CS_8_DR)
+ if (val & ADP_DP_CS_8_DR) {
+ tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
return -ETIMEDOUT;
+ }
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index e355bfd6343f..5150879888ca 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -243,7 +243,7 @@ static void usb4_port_device_release(struct device *dev)
kfree(usb4);
}
-struct device_type usb4_port_device_type = {
+const struct device_type usb4_port_device_type = {
.name = "usb4_port",
.groups = usb4_port_device_groups,
.release = usb4_port_device_release,
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9495742913d5..940ae97987ff 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -997,12 +997,12 @@ static void tb_service_release(struct device *dev)
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
- ida_simple_remove(&xd->service_ids, svc->id);
+ ida_free(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
-struct device_type tb_service_type = {
+const struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
@@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xd->service_ids, GFP_KERNEL);
if (id < 0) {
kfree(svc->key);
kfree(svc);
@@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
@@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
@@ -1893,7 +1893,7 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
-struct device_type tb_xdomain_type = {
+const struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index ee917f1b091c..8b936a2e93a0 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -435,7 +435,7 @@ int cdns_drd_init(struct cdns *cdns)
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
} else {
- dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ dev_err(cdns->dev, "not supported DID=0x%08x\n", state);
return -EINVAL;
}
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 351ede4b5de2..58e3ca7e4793 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -116,3 +116,30 @@ config USB_AUTOSUSPEND_DELAY
The default value Linux has always had is 2 seconds. Change
this value if you want a different delay and cannot modify
the command line or module parameter.
+
+config USB_DEFAULT_AUTHORIZATION_MODE
+ int "Default authorization mode for USB devices"
+ range 0 2
+ default 1
+ depends on USB
+ help
+ Select the default USB device authorization mode. Can be overridden
+ with usbcore.authorized_default command line or module parameter.
+
+ This option allows you to choose whether USB devices that are
+ connected to the system can be used by default, or if they are
+ locked down.
+
+ With value 0 all connected USB devices with the exception of root
+ hub require user space authorization before they can be used.
+
+ With value 1 (default) no user space authorization is required to
+ use connected USB devices.
+
+ With value 2 all connected USB devices with exception of internal
+ USB devices require user space authorization before they can be
+ used. Note that in this mode the differentiation between internal
+ and external USB devices relies on ACPI, and on systems without
+ ACPI selecting value 2 is analogous to selecting value 0.
+
+ If unsure, keep the default value.
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e01b1913d02b..e02ba15f6e34 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1710,9 +1710,7 @@ int usb_autoresume_device(struct usb_device *udev)
{
int status;
- status = pm_runtime_get_sync(&udev->dev);
- if (status < 0)
- pm_runtime_put_sync(&udev->dev);
+ status = pm_runtime_resume_and_get(&udev->dev);
dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&udev->dev.power.usage_count),
status);
@@ -1818,9 +1816,7 @@ int usb_autopm_get_interface(struct usb_interface *intf)
{
int status;
- status = pm_runtime_get_sync(&intf->dev);
- if (status < 0)
- pm_runtime_put_sync(&intf->dev);
+ status = pm_runtime_resume_and_get(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index a2530811cf7d..4b38b87a1343 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -141,7 +141,7 @@ static void ep_device_release(struct device *dev)
kfree(ep_dev);
}
-struct device_type usb_ep_device_type = {
+const struct device_type usb_ep_device_type = {
.name = "usb_endpoint",
.release = ep_device_release,
};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index edf74458474a..c0e005670d67 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -357,12 +357,10 @@ static const u8 ss_rh_config_descriptor[] = {
#define USB_AUTHORIZE_ALL 1
#define USB_AUTHORIZE_INTERNAL 2
-static int authorized_default = USB_AUTHORIZE_WIRED;
+static int authorized_default = CONFIG_USB_DEFAULT_AUTHORIZATION_MODE;
module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
- "Default USB device authorization: 0 is not authorized, 1 is "
- "authorized, 2 is authorized for internal devices, -1 is "
- "authorized (default, same as 1)");
+ "Default USB device authorization: 0 is not authorized, 1 is authorized (default), 2 is authorized for internal devices, -1 is authorized (same as 1)");
/*-------------------------------------------------------------------------*/
/**
@@ -2795,10 +2793,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
struct usb_device *rhdev;
struct usb_hcd *shared_hcd;
- if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
- hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
- if (IS_ERR(hcd->phy_roothub))
- return PTR_ERR(hcd->phy_roothub);
+ if (!hcd->skip_phy_initialization) {
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
+ if (IS_ERR(hcd->phy_roothub))
+ return PTR_ERR(hcd->phy_roothub);
+ } else {
+ hcd->phy_roothub = usb_phy_roothub_alloc_usb3_phy(hcd->self.sysdev);
+ if (IS_ERR(hcd->phy_roothub))
+ return PTR_ERR(hcd->phy_roothub);
+ }
retval = usb_phy_roothub_init(hcd->phy_roothub);
if (retval)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e38a4124f610..3ee8455585b6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -37,6 +37,7 @@
#include <asm/byteorder.h>
#include "hub.h"
+#include "phy.h"
#include "otg_productlist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
@@ -634,6 +635,34 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
+
+ /*
+ * There is no need to lock status_mutex here, because status_mutex
+ * protects hub->status, and the phy driver only checks the port
+ * status without changing the status.
+ */
+ if (!ret) {
+ struct usb_device *hdev = hub->hdev;
+
+ /*
+ * Only roothub will be notified of connection changes,
+ * since the USB PHY only cares about changes at the next
+ * level.
+ */
+ if (is_root_hub(hdev)) {
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+ bool connect;
+ bool connect_change;
+
+ connect_change = *change & USB_PORT_STAT_C_CONNECTION;
+ connect = *status & USB_PORT_STAT_CONNECTION;
+ if (connect_change && connect)
+ usb_phy_roothub_notify_connect(hcd->phy_roothub, port1 - 1);
+ else if (connect_change)
+ usb_phy_roothub_notify_disconnect(hcd->phy_roothub, port1 - 1);
+ }
+ }
+
return ret;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 077dfe48d01c..d2b2787be409 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1198,6 +1198,8 @@ EXPORT_SYMBOL_GPL(usb_get_status);
* same status code used to report a true stall.
*
* This call is synchronous, and may not be used in an interrupt context.
+ * If a thread in your driver uses this call, make sure your disconnect()
+ * method can wait for it to complete.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
@@ -1516,7 +1518,8 @@ void usb_enable_interface(struct usb_device *dev,
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
- * (perhaps forced by unlinking).
+ * (perhaps forced by unlinking). If a thread in your driver uses this call,
+ * make sure your disconnect() method can wait for it to complete.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
@@ -1849,7 +1852,7 @@ static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct device_type usb_if_device_type = {
+const struct device_type usb_if_device_type = {
.name = "usb_interface",
.release = usb_release_interface,
.uevent = usb_if_uevent,
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index db4ccf9ce3d9..f1a499ee482c 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -8,6 +8,7 @@
*/
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/usb/of.h>
/**
@@ -75,6 +76,76 @@ bool usb_of_has_combined_node(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_of_has_combined_node);
+static bool usb_of_has_devices_or_graph(const struct usb_device *hub)
+{
+ const struct device_node *np = hub->dev.of_node;
+ struct device_node *child;
+
+ if (of_graph_is_present(np))
+ return true;
+
+ for_each_child_of_node(np, child)
+ if (of_property_present(child, "reg"))
+ return true;
+
+ return false;
+}
+
+/**
+ * usb_of_get_connect_type() - get a USB hub's port connect_type
+ * @hub: hub to which port is for @port1
+ * @port1: one-based index of port
+ *
+ * Get the connect_type of @port1 based on the device node for @hub. If the
+ * port is described in the OF graph, the connect_type is "hotplug". If the
+ * @hub has a child device has with a 'reg' property equal to @port1 the
+ * connect_type is "hard-wired". If there isn't an OF graph or child node at
+ * all then the connect_type is "unknown". Otherwise, the port is considered
+ * "unused" because it isn't described at all.
+ *
+ * Return: A connect_type for @port1 based on the device node for @hub.
+ */
+enum usb_port_connect_type usb_of_get_connect_type(struct usb_device *hub, int port1)
+{
+ struct device_node *np, *child, *ep, *remote_np;
+ enum usb_port_connect_type connect_type;
+
+ /* Only set connect_type if binding has ports/hardwired devices. */
+ if (!usb_of_has_devices_or_graph(hub))
+ return USB_PORT_CONNECT_TYPE_UNKNOWN;
+
+ /* Assume port is unused if there's a graph or a child node. */
+ connect_type = USB_PORT_NOT_USED;
+
+ np = hub->dev.of_node;
+ /*
+ * Hotplug ports are connected to an available remote node, e.g.
+ * usb-a-connector compatible node, in the OF graph.
+ */
+ if (of_graph_is_present(np)) {
+ ep = of_graph_get_endpoint_by_regs(np, port1, -1);
+ if (ep) {
+ remote_np = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (of_device_is_available(remote_np))
+ connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
+ of_node_put(remote_np);
+ }
+ }
+
+ /*
+ * Hard-wired ports are child nodes with a reg property corresponding
+ * to the port number, i.e. a usb device.
+ */
+ child = usb_of_get_device_node(hub, port1);
+ if (of_device_is_available(child))
+ connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
+ of_node_put(child);
+
+ return connect_type;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_connect_type);
+
/**
* usb_of_get_interface_node() - get a USB interface node
* @udev: USB device of interface
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index fb1588e7c282..faa20054ad5a 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -19,6 +19,30 @@ struct usb_phy_roothub {
struct list_head list;
};
+/* Allocate the roothub_entry by specific name of phy */
+static int usb_phy_roothub_add_phy_by_name(struct device *dev, const char *name,
+ struct list_head *list)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct phy *phy;
+
+ phy = devm_of_phy_get(dev, dev->of_node, name);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
+ if (!roothub_entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&roothub_entry->list);
+
+ roothub_entry->phy = phy;
+
+ list_add_tail(&roothub_entry->list, list);
+
+ return 0;
+}
+
static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
@@ -65,6 +89,9 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
INIT_LIST_HEAD(&phy_roothub->list);
+ if (!usb_phy_roothub_add_phy_by_name(dev, "usb2-phy", &phy_roothub->list))
+ return phy_roothub;
+
for (i = 0; i < num_phys; i++) {
err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
if (err)
@@ -75,6 +102,41 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
+/**
+ * usb_phy_roothub_alloc_usb3_phy - alloc the roothub
+ * @dev: the device of the host controller
+ *
+ * Allocate the usb phy roothub if the host use a generic usb3-phy.
+ *
+ * Return: On success, a pointer to the usb_phy_roothub. Otherwise,
+ * %NULL if no use usb3 phy or %-ENOMEM if out of memory.
+ */
+struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev)
+{
+ struct usb_phy_roothub *phy_roothub;
+ int num_phys;
+
+ if (!IS_ENABLED(CONFIG_GENERIC_PHY))
+ return NULL;
+
+ num_phys = of_count_phandle_with_args(dev->of_node, "phys",
+ "#phy-cells");
+ if (num_phys <= 0)
+ return NULL;
+
+ phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
+ if (!phy_roothub)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&phy_roothub->list);
+
+ if (!usb_phy_roothub_add_phy_by_name(dev, "usb3-phy", &phy_roothub->list))
+ return phy_roothub;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc_usb3_phy);
+
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
@@ -172,6 +234,64 @@ int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate);
+/**
+ * usb_phy_roothub_notify_connect() - connect notification
+ * @phy_roothub: the phy of roothub, if the host use a generic phy.
+ * @port: the port index for connect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_notify_connect(roothub_entry->phy, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_connect);
+
+/**
+ * usb_phy_roothub_notify_disconnect() - disconnect notification
+ * @phy_roothub: the phy of roothub, if the host use a generic phy.
+ * @port: the port index for disconnect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_notify_disconnect(roothub_entry->phy, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_disconnect);
+
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 20a267cd986b..88b49c0ea6b5 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -12,6 +12,7 @@ struct device;
struct usb_phy_roothub;
struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
+struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev);
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
@@ -19,6 +20,8 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
enum phy_mode mode);
int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub);
+int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port);
+int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 4d63496f98b6..5b5e613a11e5 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/pm_qos.h>
#include <linux/component.h>
+#include <linux/usb/of.h>
#include "hub.h"
@@ -429,7 +430,7 @@ static const struct dev_pm_ops usb_port_pm_ops = {
#endif
};
-struct device_type usb_port_device_type = {
+const struct device_type usb_port_device_type = {
.name = "usb_port",
.release = usb_port_device_release,
.pm = &usb_port_pm_ops,
@@ -709,6 +710,7 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return -ENOMEM;
}
+ port_dev->connect_type = usb_of_get_connect_type(hdev, port1);
hub->ports[port1 - 1] = port_dev;
port_dev->portnum = port1;
set_bit(port1, hub->power_bits);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5d21718afb05..f98263e21c2a 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -273,9 +273,10 @@ static ssize_t avoid_reset_quirk_store(struct device *dev,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int val, rc;
+ bool val;
+ int rc;
- if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
+ if (kstrtobool(buf, &val) != 0)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
@@ -322,13 +323,14 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value, rc;
+ bool value;
+ int rc;
/* Hubs are always enabled for USB_PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
- if (sscanf(buf, "%d", &value) != 1)
+ if (kstrtobool(buf, &value) != 0)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
@@ -739,14 +741,14 @@ static ssize_t authorized_store(struct device *dev,
{
ssize_t result;
struct usb_device *usb_dev = to_usb_device(dev);
- unsigned val;
- result = sscanf(buf, "%u\n", &val);
- if (result != 1)
+ bool val;
+
+ if (kstrtobool(buf, &val) != 0)
result = -EINVAL;
- else if (val == 0)
- result = usb_deauthorize_device(usb_dev);
- else
+ else if (val)
result = usb_authorize_device(usb_dev);
+ else
+ result = usb_deauthorize_device(usb_dev);
return result < 0 ? result : size;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
@@ -847,16 +849,10 @@ static const struct attribute_group dev_string_attr_grp = {
.is_visible = dev_string_attrs_are_visible,
};
-const struct attribute_group *usb_device_groups[] = {
- &dev_attr_grp,
- &dev_string_attr_grp,
- NULL
-};
-
/* Binary descriptors */
static ssize_t
-read_descriptors(struct file *filp, struct kobject *kobj,
+descriptors_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
@@ -878,7 +874,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
srclen = sizeof(struct usb_device_descriptor);
} else {
src = udev->rawdescriptors[cfgno];
- srclen = __le16_to_cpu(udev->config[cfgno].desc.
+ srclen = le16_to_cpu(udev->config[cfgno].desc.
wTotalLength);
}
if (off < srclen) {
@@ -893,11 +889,69 @@ read_descriptors(struct file *filp, struct kobject *kobj,
}
return count - nleft;
}
+static BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
+
+static ssize_t
+bos_descriptors_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_host_bos *bos = udev->bos;
+ struct usb_bos_descriptor *desc;
+ size_t desclen, n = 0;
+
+ if (bos) {
+ desc = bos->desc;
+ desclen = le16_to_cpu(desc->wTotalLength);
+ if (off < desclen) {
+ n = min(count, desclen - (size_t) off);
+ memcpy(buf, (void *) desc + off, n);
+ }
+ }
+ return n;
+}
+static BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
-static struct bin_attribute dev_bin_attr_descriptors = {
- .attr = {.name = "descriptors", .mode = 0444},
- .read = read_descriptors,
- .size = 18 + 65535, /* dev descr + max-size raw descriptor */
+/* When modifying this list, be sure to modify dev_bin_attrs_are_visible()
+ * accordingly.
+ */
+static struct bin_attribute *dev_bin_attrs[] = {
+ &bin_attr_descriptors,
+ &bin_attr_bos_descriptors,
+ NULL
+};
+
+static umode_t dev_bin_attrs_are_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_device *udev = to_usb_device(dev);
+
+ /*
+ * There's no need to check if the descriptors attribute should
+ * be visible because all devices have a device descriptor. The
+ * bos_descriptors attribute should be visible if and only if
+ * the device has a BOS, so check if it exists here.
+ */
+ if (a == &bin_attr_bos_descriptors) {
+ if (udev->bos == NULL)
+ return 0;
+ }
+ return a->attr.mode;
+}
+
+static const struct attribute_group dev_bin_attr_grp = {
+ .bin_attrs = dev_bin_attrs,
+ .is_bin_visible = dev_bin_attrs_are_visible,
+};
+
+const struct attribute_group *usb_device_groups[] = {
+ &dev_attr_grp,
+ &dev_string_attr_grp,
+ &dev_bin_attr_grp,
+ NULL
};
/*
@@ -1015,10 +1069,6 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
struct device *dev = &udev->dev;
int retval;
- retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
- if (retval)
- goto error;
-
retval = add_persist_attributes(dev);
if (retval)
goto error;
@@ -1048,7 +1098,6 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
remove_power_attributes(dev);
remove_persist_attributes(dev);
- device_remove_bin_file(dev, &dev_bin_attr_descriptors);
}
/* Interface Association Descriptor fields */
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index a34b22537d7c..7f8a912d4fe2 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -142,12 +142,19 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
-static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
- struct acpi_pld_info *pld)
+/*
+ * Private to usb-acpi, all the core needs to know is that
+ * port_dev->location is non-zero when it has been set by the firmware.
+ */
+#define USB_ACPI_LOCATION_VALID (1 << 31)
+
+static void
+usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
{
enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *upc = NULL;
+ struct acpi_pld_info *pld = NULL;
acpi_status status;
/*
@@ -158,6 +165,12 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
* a usb device is directly hard-wired to the port. If no visible and
* no connectable, the port would be not used.
*/
+
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (ACPI_SUCCESS(status) && pld)
+ port_dev->location = USB_ACPI_LOCATION_VALID |
+ pld->group_token << 8 | pld->group_position;
+
status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer);
if (ACPI_FAILURE(status))
goto out;
@@ -166,25 +179,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4)
goto out;
+ /* UPC states port is connectable */
if (upc->package.elements[0].integer.value)
- if (pld->user_visible)
+ if (!pld)
+ ; /* keep connect_type as unknown */
+ else if (pld->user_visible)
connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
else
connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
- else if (!pld->user_visible)
+ else
connect_type = USB_PORT_NOT_USED;
out:
+ port_dev->connect_type = connect_type;
kfree(upc);
- return connect_type;
+ ACPI_FREE(pld);
}
-
-/*
- * Private to usb-acpi, all the core needs to know is that
- * port_dev->location is non-zero when it has been set by the firmware.
- */
-#define USB_ACPI_LOCATION_VALID (1 << 31)
-
static struct acpi_device *
usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
@@ -222,22 +232,12 @@ static struct acpi_device *
usb_acpi_find_companion_for_port(struct usb_port *port_dev)
{
struct acpi_device *adev;
- struct acpi_pld_info *pld;
- acpi_handle *handle;
- acpi_status status;
adev = usb_acpi_get_companion_for_port(port_dev);
if (!adev)
return NULL;
- handle = adev->handle;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status) && pld) {
- port_dev->location = USB_ACPI_LOCATION_VALID
- | pld->group_token << 8 | pld->group_position;
- port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
- ACPI_FREE(pld);
- }
+ usb_acpi_get_connect_type(port_dev, adev->handle);
return adev;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dc8d9228a5e7..a0c432b14b20 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -592,7 +592,7 @@ static char *usb_devnode(const struct device *dev,
usb_dev->bus->busnum, usb_dev->devnum);
}
-struct device_type usb_device_type = {
+const struct device_type usb_device_type = {
.name = "usb_device",
.release = usb_release_dev,
.uevent = usb_dev_uevent,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index bfecb50773b6..b8324ea05b20 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -144,10 +144,10 @@ static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
extern const struct class usbmisc_class;
extern const struct bus_type usb_bus_type;
extern struct mutex usb_port_peer_mutex;
-extern struct device_type usb_device_type;
-extern struct device_type usb_if_device_type;
-extern struct device_type usb_ep_device_type;
-extern struct device_type usb_port_device_type;
+extern const struct device_type usb_device_type;
+extern const struct device_type usb_if_device_type;
+extern const struct device_type usb_ep_device_type;
+extern const struct device_type usb_port_device_type;
extern struct usb_device_driver usb_generic_driver;
static inline int is_usb_device(const struct device *dev)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 5fc27b20df63..31078f3d41b8 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -131,7 +131,7 @@ config USB_DWC3_QCOM
tristate "Qualcomm Platform"
depends on ARCH_QCOM || COMPILE_TEST
depends on EXTCON || !EXTCON
- depends on (OF || ACPI)
+ depends on OF
default USB_DWC3
help
Some Qualcomm SoCs use DesignWare Core IP for USB2/3
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e120611a5174..c07edfc954f7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -755,6 +755,7 @@ struct dwc3_ep {
#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
#define DWC3_EP_TXFIFO_RESIZED BIT(12)
#define DWC3_EP_DELAY_STOP BIT(13)
+#define DWC3_EP_RESOURCE_ALLOCATED BIT(14)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -1257,6 +1258,7 @@ struct dwc3 {
#define DWC31_REVISION_170A 0x3137302a
#define DWC31_REVISION_180A 0x3138302a
#define DWC31_REVISION_190A 0x3139302a
+#define DWC31_REVISION_200A 0x3230302a
#define DWC32_REVISION_ANY 0x0
#define DWC32_REVISION_100A 0x3130302a
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 90a587bc29b7..fad151e78fd6 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -97,9 +97,15 @@
#define USBSS_VBUS_STAT_SESSVALID BIT(2)
#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
-/* Mask for PHY PLL REFCLK */
+/* USB_PHY_CTRL register bits in CTRL_MMR */
+#define PHY_CORE_VOLTAGE_MASK BIT(31)
#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+/* USB PHY2 register offsets */
+#define USB_PHY_PLL_REG12 0x130
+#define USB_PHY_PLL_LDO_REF_EN BIT(5)
+#define USB_PHY_PLL_LDO_REF_EN_EN BIT(4)
+
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
struct dwc3_am62 {
@@ -162,6 +168,13 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
am62->offset = args.args[0];
+ /* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
+ ret = regmap_update_bits(am62->syscon, am62->offset, PHY_CORE_VOLTAGE_MASK, 0);
+ if (ret) {
+ dev_err(dev, "failed to set phy core voltage\n");
+ return ret;
+ }
+
ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code);
if (ret) {
dev_err(dev, "failed to set phy pll reference clock rate\n");
@@ -176,8 +189,9 @@ static int dwc3_ti_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_am62 *am62;
- int i, ret;
unsigned long rate;
+ void __iomem *phy;
+ int i, ret;
u32 reg;
am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
@@ -219,6 +233,17 @@ static int dwc3_ti_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Workaround Errata i2409 */
+ phy = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "can't map PHY IOMEM resource. Won't apply i2409 fix.\n");
+ phy = NULL;
+ } else {
+ reg = readl(phy + USB_PHY_PLL_REG12);
+ reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
+ writel(reg, phy + USB_PHY_PLL_REG12);
+ }
+
/* VBUS divider select */
am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
@@ -267,21 +292,15 @@ err_pm_disable:
return ret;
}
-static int dwc3_ti_remove_core(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
- return 0;
-}
-
static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
u32 reg;
- device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+ pm_runtime_get_sync(dev);
+ device_init_wakeup(dev, false);
+ of_platform_depopulate(dev);
/* Clear mode valid bit */
reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
@@ -289,7 +308,6 @@ static void dwc3_ti_remove(struct platform_device *pdev)
dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
pm_runtime_put_sync(dev);
- clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index d1539fc9eabd..be7be00ecb34 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -52,8 +52,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
simple->need_reset = true;
- simple->resets = of_reset_control_array_get(np, false, true,
- true);
+ simple->resets = of_reset_control_array_get_optional_exclusive(np);
if (IS_ERR(simple->resets)) {
ret = PTR_ERR(simple->resets);
dev_err(dev, "failed to get device resets, err=%d\n", ret);
@@ -173,6 +172,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ .compatible = "hisilicon,hi3670-dwc3" },
+ { .compatible = "hisilicon,hi3798mv200-dwc3" },
{ .compatible = "intel,keembay-dwc3" },
{ /* Sentinel */ }
};
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index dbd6a5b2b289..f6b2fab49d5e 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -4,7 +4,6 @@
* Inspired by dwc3-of-simple.c
*/
-#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -53,22 +52,10 @@
#define APPS_USB_AVG_BW 0
#define APPS_USB_PEAK_BW MBps_to_icc(40)
-struct dwc3_acpi_pdata {
- u32 qscratch_base_offset;
- u32 qscratch_base_size;
- u32 dwc3_core_base_size;
- int qusb2_phy_irq_index;
- int dp_hs_phy_irq_index;
- int dm_hs_phy_irq_index;
- int ss_phy_irq_index;
- bool is_urs;
-};
-
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
- struct platform_device *urs_usb;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
@@ -84,8 +71,6 @@ struct dwc3_qcom {
struct notifier_block vbus_nb;
struct notifier_block host_nb;
- const struct dwc3_acpi_pdata *acpi_pdata;
-
enum usb_dr_mode mode;
bool is_suspended;
bool pm_suspended;
@@ -248,9 +233,6 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
struct device *dev = qcom->dev;
int ret;
- if (has_acpi_companion(dev))
- return 0;
-
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr),
@@ -519,31 +501,13 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
PIPE_UTMI_CLK_DIS);
}
-static int dwc3_qcom_get_irq(struct platform_device *pdev,
- const char *name, int num)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- if (np)
- ret = platform_get_irq_byname_optional(pdev_irq, name);
- else
- ret = platform_get_irq_optional(pdev_irq, num);
-
- return ret;
-}
-
static int dwc3_qcom_setup_irq(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata;
int irq;
int ret;
- irq = dwc3_qcom_get_irq(pdev, "qusb2_phy",
- pdata ? pdata->qusb2_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "qusb2_phy");
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
@@ -557,8 +521,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->qusb2_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
- pdata ? pdata->dp_hs_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "dp_hs_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -571,8 +534,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->dp_hs_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
- pdata ? pdata->dm_hs_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "dm_hs_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -585,8 +547,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->dm_hs_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
- pdata ? pdata->ss_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "ss_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -649,88 +610,6 @@ static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
return 0;
}
-static const struct property_entry dwc3_qcom_acpi_properties[] = {
- PROPERTY_ENTRY_STRING("dr_mode", "host"),
- {}
-};
-
-static const struct software_node dwc3_qcom_swnode = {
- .properties = dwc3_qcom_acpi_properties,
-};
-
-static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- struct resource *res, *child_res = NULL;
- struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
- pdev;
- int irq;
- int ret;
-
- qcom->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!qcom->dwc3)
- return -ENOMEM;
-
- qcom->dwc3->dev.parent = dev;
- qcom->dwc3->dev.type = dev->type;
- qcom->dwc3->dev.dma_mask = dev->dma_mask;
- qcom->dwc3->dev.dma_parms = dev->dma_parms;
- qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
-
- child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
- if (!child_res) {
- platform_device_put(qcom->dwc3);
- return -ENOMEM;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- ret = -ENODEV;
- goto out;
- }
-
- child_res[0].flags = res->flags;
- child_res[0].start = res->start;
- child_res[0].end = child_res[0].start +
- qcom->acpi_pdata->dwc3_core_base_size;
-
- irq = platform_get_irq(pdev_irq, 0);
- if (irq < 0) {
- ret = irq;
- goto out;
- }
- child_res[1].flags = IORESOURCE_IRQ;
- child_res[1].start = child_res[1].end = irq;
-
- ret = platform_device_add_resources(qcom->dwc3, child_res, 2);
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto out;
- }
-
- ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add properties\n");
- goto out;
- }
-
- ret = platform_device_add(qcom->dwc3);
- if (ret) {
- dev_err(&pdev->dev, "failed to add device\n");
- device_remove_software_node(&qcom->dwc3->dev);
- goto out;
- }
- kfree(child_res);
- return 0;
-
-out:
- platform_device_put(qcom->dwc3);
- kfree(child_res);
- return ret;
-}
-
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
@@ -763,57 +642,12 @@ node_put:
return ret;
}
-static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
-{
- struct platform_device *urs_usb = NULL;
- struct fwnode_handle *fwh;
- struct acpi_device *adev;
- char name[8];
- int ret;
- int id;
-
- /* Figure out device id */
- ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
- if (!ret)
- return NULL;
-
- /* Find the child using name */
- snprintf(name, sizeof(name), "USB%d", id);
- fwh = fwnode_get_named_child_node(dev->fwnode, name);
- if (!fwh)
- return NULL;
-
- adev = to_acpi_device_node(fwh);
- if (!adev)
- goto err_put_handle;
-
- urs_usb = acpi_create_platform_device(adev, NULL);
- if (IS_ERR_OR_NULL(urs_usb))
- goto err_put_handle;
-
- return urs_usb;
-
-err_put_handle:
- fwnode_handle_put(fwh);
-
- return urs_usb;
-}
-
-static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
-{
- struct fwnode_handle *fwh = urs_usb->dev.fwnode;
-
- platform_device_unregister(urs_usb);
- fwnode_handle_put(fwh);
-}
-
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- struct resource local_res;
+ struct resource *res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -825,14 +659,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- if (has_acpi_companion(dev)) {
- qcom->acpi_pdata = acpi_device_get_match_data(dev);
- if (!qcom->acpi_pdata) {
- dev_err(&pdev->dev, "no supporting ACPI device data\n");
- return -EINVAL;
- }
- }
-
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(qcom->resets)) {
return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets),
@@ -861,40 +687,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (np) {
- parent_res = res;
- } else {
- memcpy(&local_res, res, sizeof(struct resource));
- parent_res = &local_res;
-
- parent_res->start = res->start +
- qcom->acpi_pdata->qscratch_base_offset;
- parent_res->end = parent_res->start +
- qcom->acpi_pdata->qscratch_base_size;
-
- if (qcom->acpi_pdata->is_urs) {
- qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
- if (IS_ERR_OR_NULL(qcom->urs_usb)) {
- dev_err(dev, "failed to create URS USB platdev\n");
- if (!qcom->urs_usb)
- ret = -ENODEV;
- else
- ret = PTR_ERR(qcom->urs_usb);
- goto clk_disable;
- }
- }
- }
-
- qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ qcom->qscratch_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
- goto free_urs;
+ goto clk_disable;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
- goto free_urs;
+ goto clk_disable;
}
/*
@@ -906,14 +708,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ignore_pipe_clk)
dwc3_qcom_select_utmi_clk(qcom);
- if (np)
- ret = dwc3_qcom_of_register_core(pdev);
- else
- ret = dwc3_qcom_acpi_register_core(pdev);
-
+ ret = dwc3_qcom_of_register_core(pdev);
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
- goto free_urs;
+ goto clk_disable;
}
ret = dwc3_qcom_interconnect_init(qcom);
@@ -945,16 +743,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
- if (np) {
- of_platform_depopulate(&pdev->dev);
- } else {
- device_remove_software_node(&qcom->dwc3->dev);
- platform_device_del(qcom->dwc3);
- }
+ of_platform_depopulate(&pdev->dev);
platform_device_put(qcom->dwc3);
-free_urs:
- if (qcom->urs_usb)
- dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -969,21 +759,12 @@ reset_assert:
static void dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int i;
- if (np) {
- of_platform_depopulate(&pdev->dev);
- } else {
- device_remove_software_node(&qcom->dwc3->dev);
- platform_device_del(qcom->dwc3);
- }
+ of_platform_depopulate(&pdev->dev);
platform_device_put(qcom->dwc3);
- if (qcom->urs_usb)
- dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
-
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
@@ -1053,38 +834,6 @@ static const struct of_device_id dwc3_qcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
-#ifdef CONFIG_ACPI
-static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
- .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
- .qscratch_base_size = SDM845_QSCRATCH_SIZE,
- .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .qusb2_phy_irq_index = 1,
- .dp_hs_phy_irq_index = 4,
- .dm_hs_phy_irq_index = 3,
- .ss_phy_irq_index = 2
-};
-
-static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
- .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
- .qscratch_base_size = SDM845_QSCRATCH_SIZE,
- .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .qusb2_phy_irq_index = 1,
- .dp_hs_phy_irq_index = 4,
- .dm_hs_phy_irq_index = 3,
- .ss_phy_irq_index = 2,
- .is_urs = true,
-};
-
-static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
- { "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
- { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
- { "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata },
- { "QCOM04A6", (unsigned long)&sdm845_acpi_pdata },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
-#endif
-
static struct platform_driver dwc3_qcom_driver = {
.probe = dwc3_qcom_probe,
.remove_new = dwc3_qcom_remove,
@@ -1092,7 +841,6 @@ static struct platform_driver dwc3_qcom_driver = {
.name = "dwc3-qcom",
.pm = &dwc3_qcom_dev_pm_ops,
.of_match_table = dwc3_qcom_of_match,
- .acpi_match_table = ACPI_PTR(dwc3_qcom_acpi_match),
},
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 6ae8a36f21cf..72bb722da2f2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -646,6 +646,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
+ dwc3_gadget_start_config(dwc, 2);
dwc3_gadget_clear_tx_fifos(dwc);
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 28f49400f3e8..40c52dbc28d3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -519,77 +519,56 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ if (dep->flags & DWC3_EP_RESOURCE_ALLOCATED)
+ return 0;
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
- return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
&params);
+ if (ret)
+ return ret;
+
+ dep->flags |= DWC3_EP_RESOURCE_ALLOCATED;
+ return 0;
}
/**
- * dwc3_gadget_start_config - configure ep resources
- * @dep: endpoint that is being enabled
- *
- * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
- * completion, it will set Transfer Resource for all available endpoints.
- *
- * The assignment of transfer resources cannot perfectly follow the data book
- * due to the fact that the controller driver does not have all knowledge of the
- * configuration in advance. It is given this information piecemeal by the
- * composite gadget framework after every SET_CONFIGURATION and
- * SET_INTERFACE. Trying to follow the databook programming model in this
- * scenario can cause errors. For two reasons:
- *
- * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
- * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
- * incorrect in the scenario of multiple interfaces.
- *
- * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
- * endpoint on alt setting (8.1.6).
- *
- * The following simplified method is used instead:
+ * dwc3_gadget_start_config - reset endpoint resources
+ * @dwc: pointer to the DWC3 context
+ * @resource_index: DEPSTARTCFG.XferRscIdx value (must be 0 or 2)
*
- * All hardware endpoints can be assigned a transfer resource and this setting
- * will stay persistent until either a core reset or hibernation. So whenever we
- * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
- * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
- * guaranteed that there are as many transfer resources as endpoints.
+ * Set resource_index=0 to reset all endpoints' resources allocation. Do this as
+ * part of the power-on/soft-reset initialization.
*
- * This function is called for each endpoint when it is being enabled but is
- * triggered only when called for EP0-out, which always happens first, and which
- * should only happen in one of the above conditions.
+ * Set resource_index=2 to reset only non-control endpoints' resources. Do this
+ * on receiving the SET_CONFIGURATION request or hibernation resume.
*/
-static int dwc3_gadget_start_config(struct dwc3_ep *dep)
+int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
- struct dwc3 *dwc;
u32 cmd;
int i;
int ret;
- if (dep->number)
- return 0;
+ if (resource_index != 0 && resource_index != 2)
+ return -EINVAL;
memset(&params, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
- dwc = dep->dwc;
+ cmd |= DWC3_DEPCMD_PARAM(resource_index);
- ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dwc->eps[0], cmd, &params);
if (ret)
return ret;
- for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
- struct dwc3_ep *dep = dwc->eps[i];
-
- if (!dep)
- continue;
-
- ret = dwc3_gadget_set_xfer_resource(dep);
- if (ret)
- return ret;
- }
+ /* Reset resource allocation flags */
+ for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
+ dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
return 0;
}
@@ -884,16 +863,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
ret = dwc3_gadget_resize_tx_fifos(dep);
if (ret)
return ret;
-
- ret = dwc3_gadget_start_config(dep);
- if (ret)
- return ret;
}
ret = dwc3_gadget_set_ep_config(dep, action);
if (ret)
return ret;
+ if (!(dep->flags & DWC3_EP_RESOURCE_ALLOCATED)) {
+ ret = dwc3_gadget_set_xfer_resource(dep);
+ if (ret)
+ return ret;
+ }
+
if (!(dep->flags & DWC3_EP_ENABLED)) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
@@ -1047,7 +1028,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->type = 0;
- mask = DWC3_EP_TXFIFO_RESIZED;
+ mask = DWC3_EP_TXFIFO_RESIZED | DWC3_EP_RESOURCE_ALLOCATED;
/*
* dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
* set. Do not clear DEP flags, so that the end transfer command will
@@ -2913,6 +2894,12 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ ret = dwc3_gadget_start_config(dwc, 0);
+ if (ret) {
+ dev_err(dwc->dev, "failed to config endpoints\n");
+ return ret;
+ }
+
dep = dwc->eps[0];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
@@ -3428,7 +3415,7 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_request *req, const struct dwc3_event_depevt *event,
int status)
{
- struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
+ struct dwc3_trb *trb;
struct scatterlist *sg = req->sg;
struct scatterlist *s;
unsigned int num_queued = req->num_queued_sgs;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 55a56cf67d73..d73e735e4081 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -119,6 +119,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
+int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 43230915323c..5a5cb6ce9946 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,52 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-port.h"
+#include "../host/xhci-ext-caps.h"
+#include "../host/xhci-caps.h"
#include "core.h"
+#define XHCI_HCSPARAMS1 0x4
+#define XHCI_PORTSC_BASE 0x400
+
+/**
+ * dwc3_power_off_all_roothub_ports - Power off all Root hub ports
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
+{
+ void __iomem *xhci_regs;
+ u32 op_regs_base;
+ int port_num;
+ u32 offset;
+ u32 reg;
+ int i;
+
+ /* xhci regs is not mapped yet, do it temperary here */
+ if (dwc->xhci_resources[0].start) {
+ xhci_regs = ioremap(dwc->xhci_resources[0].start, DWC3_XHCI_REGS_END);
+ if (!xhci_regs) {
+ dev_err(dwc->dev, "Failed to ioremap xhci_regs\n");
+ return;
+ }
+
+ op_regs_base = HC_LENGTH(readl(xhci_regs));
+ reg = readl(xhci_regs + XHCI_HCSPARAMS1);
+ port_num = HCS_MAX_PORTS(reg);
+
+ for (i = 1; i <= port_num; i++) {
+ offset = op_regs_base + XHCI_PORTSC_BASE + 0x10 * (i - 1);
+ reg = readl(xhci_regs + offset);
+ reg &= ~PORT_POWER;
+ writel(reg, xhci_regs + offset);
+ }
+
+ iounmap(xhci_regs);
+ } else {
+ dev_err(dwc->dev, "xhci base reg invalid\n");
+ }
+}
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -66,6 +110,12 @@ int dwc3_host_init(struct dwc3 *dwc)
int ret, irq;
int prop_idx = 0;
+ /*
+ * Some platforms need to power off all Root hub ports immediately after DWC3 set to host
+ * mode to avoid VBUS glitch happen when xhci get reset later.
+ */
+ dwc3_power_off_all_roothub_ports(dwc);
+
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b3592bcb0f96..566ff0b1282a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -190,6 +190,7 @@ config USB_F_MASS_STORAGE
tristate
config USB_F_FS
+ select DMA_SHARED_BUFFER
tristate
config USB_F_UAC1
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6bff6cb93789..bffbc1dc651f 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -15,6 +15,9 @@
/* #define VERBOSE_DEBUG */
#include <linux/blkdev.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/fs_parser.h>
@@ -43,6 +46,8 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+MODULE_IMPORT_NS(DMA_BUF);
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -124,6 +129,25 @@ struct ffs_ep {
u8 num;
};
+struct ffs_dmabuf_priv {
+ struct list_head entry;
+ struct kref ref;
+ struct ffs_data *ffs;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ spinlock_t lock;
+ u64 context;
+ struct usb_request *req; /* P: ffs->eps_lock */
+ struct usb_ep *ep; /* P: ffs->eps_lock */
+};
+
+struct ffs_dma_fence {
+ struct dma_fence base;
+ struct ffs_dmabuf_priv *priv;
+ struct work_struct work;
+};
+
struct ffs_epfile {
/* Protects ep->ep and ep->req. */
struct mutex mutex;
@@ -197,6 +221,11 @@ struct ffs_epfile {
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
+
+ /* Protects dmabufs */
+ struct mutex dmabufs_mutex;
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+ atomic_t seqno;
};
struct ffs_buffer {
@@ -934,31 +963,44 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
return ret;
}
-static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+static struct ffs_ep *ffs_epfile_wait_ep(struct file *file)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_request *req;
struct ffs_ep *ep;
- char *data = NULL;
- ssize_t ret, data_len = -EINVAL;
- int halt;
-
- /* Are we still active? */
- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
- return -ENODEV;
+ int ret;
/* Wait for endpoint to be enabled */
ep = epfile->ep;
if (!ep) {
if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
- return -EINTR;
+ return ERR_PTR(-EINTR);
}
+ return ep;
+}
+
+static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct usb_request *req;
+ struct ffs_ep *ep;
+ char *data = NULL;
+ ssize_t ret, data_len = -EINVAL;
+ int halt;
+
+ /* Are we still active? */
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
/* Do we halt? */
halt = (!io_data->read == !epfile->in);
if (halt && epfile->isoc)
@@ -1258,10 +1300,58 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
return res;
}
+static void ffs_dmabuf_release(struct kref *ref)
+{
+ struct ffs_dmabuf_priv *priv = container_of(ref, struct ffs_dmabuf_priv, ref);
+ struct dma_buf_attachment *attach = priv->attach;
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ pr_vdebug("FFS DMABUF release\n");
+ dma_resv_lock(dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(dmabuf);
+ kfree(priv);
+}
+
+static void ffs_dmabuf_get(struct dma_buf_attachment *attach)
+{
+ struct ffs_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_get(&priv->ref);
+}
+
+static void ffs_dmabuf_put(struct dma_buf_attachment *attach)
+{
+ struct ffs_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_put(&priv->ref, ffs_dmabuf_release);
+}
+
static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ struct ffs_dmabuf_priv *priv, *tmp;
+ struct ffs_data *ffs = epfile->ffs;
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ /* Close all attached DMABUFs */
+ list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) {
+ /* Cancel any pending transfer */
+ spin_lock_irq(&ffs->eps_lock);
+ if (priv->ep && priv->req)
+ usb_ep_dequeue(priv->ep, priv->req);
+ spin_unlock_irq(&ffs->eps_lock);
+
+ list_del(&priv->entry);
+ ffs_dmabuf_put(priv->attach);
+ }
+
+ mutex_unlock(&epfile->dmabufs_mutex);
__ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
@@ -1269,6 +1359,356 @@ ffs_epfile_release(struct inode *inode, struct file *file)
return 0;
}
+static void ffs_dmabuf_cleanup(struct work_struct *work)
+{
+ struct ffs_dma_fence *dma_fence =
+ container_of(work, struct ffs_dma_fence, work);
+ struct ffs_dmabuf_priv *priv = dma_fence->priv;
+ struct dma_buf_attachment *attach = priv->attach;
+ struct dma_fence *fence = &dma_fence->base;
+
+ ffs_dmabuf_put(attach);
+ dma_fence_put(fence);
+}
+
+static void ffs_dmabuf_signal_done(struct ffs_dma_fence *dma_fence, int ret)
+{
+ struct ffs_dmabuf_priv *priv = dma_fence->priv;
+ struct dma_fence *fence = &dma_fence->base;
+ bool cookie = dma_fence_begin_signalling();
+
+ dma_fence_get(fence);
+ fence->error = ret;
+ dma_fence_signal(fence);
+ dma_fence_end_signalling(cookie);
+
+ /*
+ * The fence will be unref'd in ffs_dmabuf_cleanup.
+ * It can't be done here, as the unref functions might try to lock
+ * the resv object, which would deadlock.
+ */
+ INIT_WORK(&dma_fence->work, ffs_dmabuf_cleanup);
+ queue_work(priv->ffs->io_completion_wq, &dma_fence->work);
+}
+
+static void ffs_epfile_dmabuf_io_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_vdebug("FFS: DMABUF transfer complete, status=%d\n", req->status);
+ ffs_dmabuf_signal_done(req->context, req->status);
+ usb_ep_free_request(ep, req);
+}
+
+static const char *ffs_dmabuf_get_driver_name(struct dma_fence *fence)
+{
+ return "functionfs";
+}
+
+static const char *ffs_dmabuf_get_timeline_name(struct dma_fence *fence)
+{
+ return "";
+}
+
+static void ffs_dmabuf_fence_release(struct dma_fence *fence)
+{
+ struct ffs_dma_fence *dma_fence =
+ container_of(fence, struct ffs_dma_fence, base);
+
+ kfree(dma_fence);
+}
+
+static const struct dma_fence_ops ffs_dmabuf_fence_ops = {
+ .get_driver_name = ffs_dmabuf_get_driver_name,
+ .get_timeline_name = ffs_dmabuf_get_timeline_name,
+ .release = ffs_dmabuf_fence_release,
+};
+
+static int ffs_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
+{
+ if (!nonblock)
+ return dma_resv_lock_interruptible(dmabuf->resv, NULL);
+
+ if (!dma_resv_trylock(dmabuf->resv))
+ return -EBUSY;
+
+ return 0;
+}
+
+static struct dma_buf_attachment *
+ffs_dmabuf_find_attachment(struct ffs_epfile *epfile, struct dma_buf *dmabuf)
+{
+ struct device *dev = epfile->ffs->gadget->dev.parent;
+ struct dma_buf_attachment *attach = NULL;
+ struct ffs_dmabuf_priv *priv;
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ list_for_each_entry(priv, &epfile->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ attach = priv->attach;
+ break;
+ }
+ }
+
+ if (attach)
+ ffs_dmabuf_get(attach);
+
+ mutex_unlock(&epfile->dmabufs_mutex);
+
+ return attach ?: ERR_PTR(-EPERM);
+}
+
+static int ffs_dmabuf_attach(struct file *file, int fd)
+{
+ bool nonblock = file->f_flags & O_NONBLOCK;
+ struct ffs_epfile *epfile = file->private_data;
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+ struct dma_buf_attachment *attach;
+ struct ffs_dmabuf_priv *priv;
+ enum dma_data_direction dir;
+ struct sg_table *sg_table;
+ struct dma_buf *dmabuf;
+ int err;
+
+ if (!gadget || !gadget->sg_supported)
+ return -EPERM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ attach = dma_buf_attach(dmabuf, gadget->dev.parent);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_dmabuf_detach;
+ }
+
+ dir = epfile->in ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ err = ffs_dma_resv_lock(dmabuf, nonblock);
+ if (err)
+ goto err_free_priv;
+
+ sg_table = dma_buf_map_attachment(attach, dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ if (IS_ERR(sg_table)) {
+ err = PTR_ERR(sg_table);
+ goto err_free_priv;
+ }
+
+ attach->importer_priv = priv;
+
+ priv->sgt = sg_table;
+ priv->dir = dir;
+ priv->ffs = epfile->ffs;
+ priv->attach = attach;
+ spin_lock_init(&priv->lock);
+ kref_init(&priv->ref);
+ priv->context = dma_fence_context_alloc(1);
+
+ mutex_lock(&epfile->dmabufs_mutex);
+ list_add(&priv->entry, &epfile->dmabufs);
+ mutex_unlock(&epfile->dmabufs_mutex);
+
+ return 0;
+
+err_free_priv:
+ kfree(priv);
+err_dmabuf_detach:
+ dma_buf_detach(dmabuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return err;
+}
+
+static int ffs_dmabuf_detach(struct file *file, int fd)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct ffs_data *ffs = epfile->ffs;
+ struct device *dev = ffs->gadget->dev.parent;
+ struct ffs_dmabuf_priv *priv, *tmp;
+ struct dma_buf *dmabuf;
+ int ret = -EPERM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ /* Cancel any pending transfer */
+ spin_lock_irq(&ffs->eps_lock);
+ if (priv->ep && priv->req)
+ usb_ep_dequeue(priv->ep, priv->req);
+ spin_unlock_irq(&ffs->eps_lock);
+
+ list_del(&priv->entry);
+
+ /* Unref the reference from ffs_dmabuf_attach() */
+ ffs_dmabuf_put(priv->attach);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&epfile->dmabufs_mutex);
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
+static int ffs_dmabuf_transfer(struct file *file,
+ const struct usb_ffs_dmabuf_transfer_req *req)
+{
+ bool nonblock = file->f_flags & O_NONBLOCK;
+ struct ffs_epfile *epfile = file->private_data;
+ struct dma_buf_attachment *attach;
+ struct ffs_dmabuf_priv *priv;
+ struct ffs_dma_fence *fence;
+ struct usb_request *usb_req;
+ struct dma_buf *dmabuf;
+ struct ffs_ep *ep;
+ bool cookie;
+ u32 seqno;
+ int ret;
+
+ if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(req->fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ if (req->length > dmabuf->size || req->length == 0) {
+ ret = -EINVAL;
+ goto err_dmabuf_put;
+ }
+
+ attach = ffs_dmabuf_find_attachment(epfile, dmabuf);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = attach->importer_priv;
+
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ goto err_attachment_put;
+ }
+
+ ret = ffs_dma_resv_lock(dmabuf, nonblock);
+ if (ret)
+ goto err_attachment_put;
+
+ /* Make sure we don't have writers */
+ if (!dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_WRITE)) {
+ pr_vdebug("FFS WRITE fence is not signaled\n");
+ ret = -EBUSY;
+ goto err_resv_unlock;
+ }
+
+ /* If we're writing to the DMABUF, make sure we don't have readers */
+ if (epfile->in &&
+ !dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_READ)) {
+ pr_vdebug("FFS READ fence is not signaled\n");
+ ret = -EBUSY;
+ goto err_resv_unlock;
+ }
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (ret)
+ goto err_resv_unlock;
+
+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err_resv_unlock;
+ }
+
+ fence->priv = priv;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ /* In the meantime, endpoint got disabled or changed. */
+ if (epfile->ep != ep) {
+ ret = -ESHUTDOWN;
+ goto err_fence_put;
+ }
+
+ usb_req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+ if (!usb_req) {
+ ret = -ENOMEM;
+ goto err_fence_put;
+ }
+
+ /*
+ * usb_ep_queue() guarantees that all transfers are processed in the
+ * order they are enqueued, so we can use a simple incrementing
+ * sequence number for the dma_fence.
+ */
+ seqno = atomic_add_return(1, &epfile->seqno);
+
+ dma_fence_init(&fence->base, &ffs_dmabuf_fence_ops,
+ &priv->lock, priv->context, seqno);
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base,
+ dma_resv_usage_rw(epfile->in));
+ dma_resv_unlock(dmabuf->resv);
+
+ /* Now that the dma_fence is in place, queue the transfer. */
+
+ usb_req->length = req->length;
+ usb_req->buf = NULL;
+ usb_req->sg = priv->sgt->sgl;
+ usb_req->num_sgs = sg_nents_for_len(priv->sgt->sgl, req->length);
+ usb_req->sg_was_mapped = true;
+ usb_req->context = fence;
+ usb_req->complete = ffs_epfile_dmabuf_io_complete;
+
+ cookie = dma_fence_begin_signalling();
+ ret = usb_ep_queue(ep->ep, usb_req, GFP_ATOMIC);
+ dma_fence_end_signalling(cookie);
+ if (!ret) {
+ priv->req = usb_req;
+ priv->ep = ep->ep;
+ } else {
+ pr_warn("FFS: Failed to queue DMABUF: %d\n", ret);
+ ffs_dmabuf_signal_done(fence, ret);
+ usb_ep_free_request(ep->ep, usb_req);
+ }
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ dma_buf_put(dmabuf);
+
+ return ret;
+
+err_fence_put:
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ dma_fence_put(&fence->base);
+err_resv_unlock:
+ dma_resv_unlock(dmabuf->resv);
+err_attachment_put:
+ ffs_dmabuf_put(attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
static long ffs_epfile_ioctl(struct file *file, unsigned code,
unsigned long value)
{
@@ -1279,18 +1719,49 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
- /* Wait for endpoint to be enabled */
- ep = epfile->ep;
- if (!ep) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
+ switch (code) {
+ case FUNCTIONFS_DMABUF_ATTACH:
+ {
+ int fd;
- ret = wait_event_interruptible(
- epfile->ffs->wait, (ep = epfile->ep));
- if (ret)
- return -EINTR;
+ if (copy_from_user(&fd, (void __user *)value, sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_attach(file, fd);
+ }
+ case FUNCTIONFS_DMABUF_DETACH:
+ {
+ int fd;
+
+ if (copy_from_user(&fd, (void __user *)value, sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_detach(file, fd);
+ }
+ case FUNCTIONFS_DMABUF_TRANSFER:
+ {
+ struct usb_ffs_dmabuf_transfer_req req;
+
+ if (copy_from_user(&req, (void __user *)value, sizeof(req))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_transfer(file, &req);
+ }
+ default:
+ break;
}
+ /* Wait for endpoint to be enabled */
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
@@ -1863,6 +2334,8 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
for (i = 1; i <= count; ++i, ++epfile) {
epfile->ffs = ffs;
mutex_init(&epfile->mutex);
+ mutex_init(&epfile->dmabufs_mutex);
+ INIT_LIST_HEAD(&epfile->dmabufs);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
@@ -3445,6 +3918,25 @@ static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
func_inst.group);
}
+static ssize_t f_fs_opts_ready_show(struct config_item *item, char *page)
+{
+ struct f_fs_opts *opts = to_ffs_opts(item);
+ int ready;
+
+ ffs_dev_lock();
+ ready = opts->dev->desc_ready;
+ ffs_dev_unlock();
+
+ return sprintf(page, "%d\n", ready);
+}
+
+CONFIGFS_ATTR_RO(f_fs_opts_, ready);
+
+static struct configfs_attribute *ffs_attrs[] = {
+ &f_fs_opts_attr_ready,
+ NULL,
+};
+
static void ffs_attr_release(struct config_item *item)
{
struct f_fs_opts *opts = to_ffs_opts(item);
@@ -3458,6 +3950,7 @@ static struct configfs_item_operations ffs_item_ops = {
static const struct config_item_type ffs_func_type = {
.ct_item_ops = &ffs_item_ops,
+ .ct_attrs = ffs_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 3c5a6f6ac341..444212c0b5a9 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -718,7 +718,7 @@ static const struct net_device_ops eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static struct device_type gadget_type = {
+static const struct device_type gadget_type = {
.name = "gadget",
};
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index dd3241fc6939..d41f5f31dadd 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
data[1] = UVC_STREAM_EOH | video->fid;
+ if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
+ data[1] |= UVC_STREAM_ERR;
+
if (video->queue.buf_used == 0 && ts.tv_sec) {
/* dwClockFrequency is 48 MHz */
u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
@@ -370,6 +373,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
struct uvc_buffer *last_buf;
+ struct usb_request *to_queue = req;
unsigned long flags;
bool is_bulk = video->max_payload_size;
int ret = 0;
@@ -397,7 +401,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
case -EXDEV:
uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
- queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
+ if (req->length != 0)
+ queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
break;
case -ESHUTDOWN: /* disconnect from host. */
@@ -425,59 +430,59 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
* we're still streaming before queueing the usb_request
* back to req_free
*/
- if (video->is_enabled) {
+ if (!video->is_enabled) {
+ uvc_video_free_request(ureq, ep);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ uvcg_queue_cancel(queue, 0);
+
+ return;
+ }
+
+ /*
+ * Here we check whether any request is available in the ready
+ * list. If it is, queue it to the ep and add the current
+ * usb_request to the req_free list - for video_pump to fill in.
+ * Otherwise, just use the current usb_request to queue a 0
+ * length request to the ep. Since we always add to the req_free
+ * list if we dequeue from the ready list, there will never
+ * be a situation where the req_free list is completely out of
+ * requests and cannot recover.
+ */
+ to_queue->length = 0;
+ if (!list_empty(&video->req_ready)) {
+ to_queue = list_first_entry(&video->req_ready,
+ struct usb_request, list);
+ list_del(&to_queue->list);
+ list_add_tail(&req->list, &video->req_free);
/*
- * Here we check whether any request is available in the ready
- * list. If it is, queue it to the ep and add the current
- * usb_request to the req_free list - for video_pump to fill in.
- * Otherwise, just use the current usb_request to queue a 0
- * length request to the ep. Since we always add to the req_free
- * list if we dequeue from the ready list, there will never
- * be a situation where the req_free list is completely out of
- * requests and cannot recover.
+ * Queue work to the wq as well since it is possible that a
+ * buffer may not have been completely encoded with the set of
+ * in-flight usb requests for whih the complete callbacks are
+ * firing.
+ * In that case, if we do not queue work to the worker thread,
+ * the buffer will never be marked as complete - and therefore
+ * not be returned to userpsace. As a result,
+ * dequeue -> queue -> dequeue flow of uvc buffers will not
+ * happen.
*/
- struct usb_request *to_queue = req;
-
- to_queue->length = 0;
- if (!list_empty(&video->req_ready)) {
- to_queue = list_first_entry(&video->req_ready,
- struct usb_request, list);
- list_del(&to_queue->list);
- list_add_tail(&req->list, &video->req_free);
- /*
- * Queue work to the wq as well since it is possible that a
- * buffer may not have been completely encoded with the set of
- * in-flight usb requests for whih the complete callbacks are
- * firing.
- * In that case, if we do not queue work to the worker thread,
- * the buffer will never be marked as complete - and therefore
- * not be returned to userpsace. As a result,
- * dequeue -> queue -> dequeue flow of uvc buffers will not
- * happen.
- */
- queue_work(video->async_wq, &video->pump);
- }
+ queue_work(video->async_wq, &video->pump);
+ }
+ /*
+ * Queue to the endpoint. The actual queueing to ep will
+ * only happen on one thread - the async_wq for bulk endpoints
+ * and this thread for isoc endpoints.
+ */
+ ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
+ if (ret < 0) {
/*
- * Queue to the endpoint. The actual queueing to ep will
- * only happen on one thread - the async_wq for bulk endpoints
- * and this thread for isoc endpoints.
+ * Endpoint error, but the stream is still enabled.
+ * Put request back in req_free for it to be cleaned
+ * up later.
*/
- ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
- if (ret < 0) {
- /*
- * Endpoint error, but the stream is still enabled.
- * Put request back in req_free for it to be cleaned
- * up later.
- */
- list_add_tail(&to_queue->list, &video->req_free);
- }
- } else {
- uvc_video_free_request(ureq, ep);
- ret = 0;
+ list_add_tail(&to_queue->list, &video->req_free);
}
+
spin_unlock_irqrestore(&video->req_lock, flags);
- if (ret < 0)
- uvcg_queue_cancel(queue, 0);
}
static int
@@ -594,10 +599,7 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
-
- if (buf != NULL) {
- video->encode(req, video, buf);
- } else {
+ if (!buf) {
/*
* Either the queue has been disconnected or no video buffer
* available for bulk transfer. Either way, stop processing
@@ -607,6 +609,8 @@ static void uvcg_video_pump(struct work_struct *work)
break;
}
+ video->encode(req, video, buf);
+
spin_unlock_irqrestore(&queue->irqlock, flags);
spin_lock_irqsave(&video->req_lock, flags);
@@ -623,14 +627,7 @@ static void uvcg_video_pump(struct work_struct *work)
uvcg_queue_cancel(queue, 0);
break;
}
-
- /* The request is owned by the endpoint / ready list. */
- req = NULL;
}
-
- if (!req)
- return;
-
spin_lock_irqsave(&video->req_lock, flags);
if (video->is_enabled)
list_add_tail(&req->list, &video->req_free);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d59f94464b87..9d4150124fdb 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -903,6 +903,11 @@ int usb_gadget_map_request_by_dev(struct device *dev,
if (req->length == 0)
return 0;
+ if (req->sg_was_mapped) {
+ req->num_mapped_sgs = req->num_sgs;
+ return 0;
+ }
+
if (req->num_sgs) {
int mapped;
@@ -948,7 +953,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_map_request);
void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
- if (req->length == 0)
+ if (req->length == 0 || req->sg_was_mapped)
return;
if (req->num_mapped_sgs) {
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e8042c158f6d..e82d03224f94 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -13,7 +13,7 @@
* code from Dave Liu and Shlomi Gridish.
*/
-#undef VERBOSE
+#define pr_fmt(x) "udc: " x
#include <linux/module.h>
#include <linux/kernel.h>
@@ -183,9 +183,9 @@ __acquires(ep->udc->lock)
usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (status && (status != -ESHUTDOWN))
- VDBG("complete %s req %p stat %d len %u/%u",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
+ dev_vdbg(&udc->gadget.dev, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
ep->stopped = 1;
@@ -285,7 +285,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
if (time_after(jiffies, timeout)) {
- ERR("udc reset timeout!\n");
+ dev_err(&udc->gadget.dev, "udc reset timeout!\n");
return -ETIMEDOUT;
}
cpu_relax();
@@ -308,9 +308,10 @@ static int dr_controller_setup(struct fsl_udc *udc)
tmp &= USB_EP_LIST_ADDRESS_MASK;
fsl_writel(tmp, &dr_regs->endpointlistaddr);
- VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
- udc->ep_qh, (int)tmp,
- fsl_readl(&dr_regs->endpointlistaddr));
+ dev_vdbg(&udc->gadget.dev,
+ "vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x\n",
+ udc->ep_qh, (int)tmp,
+ fsl_readl(&dr_regs->endpointlistaddr));
max_no_of_ep = (0x0000001F & fsl_readl(&dr_regs->dccparams));
for (ep_num = 1; ep_num < max_no_of_ep; ep_num++) {
@@ -498,7 +499,7 @@ static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
break;
default:
- VDBG("error ep type is %d", ep_type);
+ dev_vdbg(&udc->gadget.dev, "error ep type is %d\n", ep_type);
return;
}
if (zlt)
@@ -611,10 +612,10 @@ static int fsl_ep_enable(struct usb_ep *_ep,
spin_unlock_irqrestore(&udc->lock, flags);
retval = 0;
- VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
- ep->ep.desc->bEndpointAddress & 0x0f,
- (desc->bEndpointAddress & USB_DIR_IN)
- ? "in" : "out", max);
+ dev_vdbg(&udc->gadget.dev, "enabled %s (ep%d%s) maxpacket %d\n",
+ ep->ep.name, ep->ep.desc->bEndpointAddress & 0x0f,
+ (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ max);
en_done:
return retval;
}
@@ -633,7 +634,10 @@ static int fsl_ep_disable(struct usb_ep *_ep)
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || !ep->ep.desc) {
- VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
+ /*
+ * dev_vdbg(&udc->gadget.dev, "%s not enabled\n",
+ * _ep ? ep->ep.name : NULL);
+ */
return -EINVAL;
}
@@ -659,7 +663,7 @@ static int fsl_ep_disable(struct usb_ep *_ep)
ep->stopped = 1;
spin_unlock_irqrestore(&udc->lock, flags);
- VDBG("disabled %s OK", _ep->name);
+ dev_vdbg(&udc->gadget.dev, "disabled %s OK\n", _ep->name);
return 0;
}
@@ -719,8 +723,8 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
u32 temp, bitmask, tmp_stat;
- /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
- VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
+ /* dev_vdbg(&udc->gadget.dev, "QH addr Register 0x%8x\n", dr_regs->endpointlistaddr);
+ dev_vdbg(&udc->gadget.dev, "ep_qh[%d] addr is 0x%8x\n", i, (u32)&(ep->udc->ep_qh[i])); */
bitmask = ep_is_in(ep)
? (1 << (ep_index(ep) + 16))
@@ -808,7 +812,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
*is_last = 0;
if ((*is_last) == 0)
- VDBG("multi-dtd request!");
+ dev_vdbg(&udc_controller->gadget.dev, "multi-dtd request!\n");
/* Fill in the transfer size; set active bit */
swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
@@ -820,7 +824,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
mb();
- VDBG("length = %d address= 0x%x", *length, (int)*dma);
+ dev_vdbg(&udc_controller->gadget.dev, "length = %d address= 0x%x\n", *length, (int)*dma);
return dtd;
}
@@ -871,11 +875,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
- VDBG("%s, bad params", __func__);
+ dev_vdbg(&udc->gadget.dev, "%s, bad params\n", __func__);
return -EINVAL;
}
if (unlikely(!_ep || !ep->ep.desc)) {
- VDBG("%s, bad ep", __func__);
+ dev_vdbg(&udc->gadget.dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
@@ -1036,8 +1040,8 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
udc->ep0_dir = 0;
}
out:
- VDBG(" %s %s halt stat %d", ep->ep.name,
- value ? "set" : "clear", status);
+ dev_vdbg(&udc->gadget.dev, "%s %s halt stat %d\n", ep->ep.name,
+ value ? "set" : "clear", status);
return status;
}
@@ -1105,7 +1109,8 @@ static void fsl_ep_fifo_flush(struct usb_ep *_ep)
/* Wait until flush complete */
while (fsl_readl(&dr_regs->endptflush)) {
if (time_after(jiffies, timeout)) {
- ERR("ep flush timeout\n");
+ dev_err(&udc_controller->gadget.dev,
+ "ep flush timeout\n");
return;
}
cpu_relax();
@@ -1177,7 +1182,7 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- VDBG("VBUS %s", is_active ? "on" : "off");
+ dev_vdbg(&gadget->dev, "VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -1543,7 +1548,7 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
- ERR("Unexpected ep0 packets\n");
+ dev_err(&udc->gadget.dev, "Unexpected ep0 packets\n");
break;
default:
ep0stall(udc);
@@ -1612,7 +1617,7 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
errors = hc32_to_cpu(curr_td->size_ioc_sts);
if (errors & DTD_ERROR_MASK) {
if (errors & DTD_STATUS_HALTED) {
- ERR("dTD error %08x QH=%d\n", errors, pipe);
+ dev_err(&udc->gadget.dev, "dTD error %08x QH=%d\n", errors, pipe);
/* Clear the errors and Halt condition */
tmp = hc32_to_cpu(curr_qh->size_ioc_int_sts);
tmp &= ~errors;
@@ -1623,32 +1628,35 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
break;
}
if (errors & DTD_STATUS_DATA_BUFF_ERR) {
- VDBG("Transfer overflow");
+ dev_vdbg(&udc->gadget.dev, "Transfer overflow\n");
status = -EPROTO;
break;
} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
- VDBG("ISO error");
+ dev_vdbg(&udc->gadget.dev, "ISO error\n");
status = -EILSEQ;
break;
} else
- ERR("Unknown error has occurred (0x%x)!\n",
+ dev_err(&udc->gadget.dev,
+ "Unknown error has occurred (0x%x)!\n",
errors);
} else if (hc32_to_cpu(curr_td->size_ioc_sts)
& DTD_STATUS_ACTIVE) {
- VDBG("Request not complete");
+ dev_vdbg(&udc->gadget.dev, "Request not complete\n");
status = REQ_UNCOMPLETE;
return status;
} else if (remaining_length) {
if (direction) {
- VDBG("Transmit dTD remaining length not zero");
+ dev_vdbg(&udc->gadget.dev,
+ "Transmit dTD remaining length not zero\n");
status = -EPROTO;
break;
} else {
break;
}
} else {
- VDBG("dTD transmitted successful");
+ dev_vdbg(&udc->gadget.dev,
+ "dTD transmitted successful\n");
}
if (j != curr_req->dtd_count - 1)
@@ -1691,7 +1699,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
/* If the ep is configured */
if (!curr_ep->ep.name) {
- WARNING("Invalid EP?");
+ dev_warn(&udc->gadget.dev, "Invalid EP?\n");
continue;
}
@@ -1700,8 +1708,9 @@ static void dtd_complete_irq(struct fsl_udc *udc)
queue) {
status = process_ep_req(udc, i, curr_req);
- VDBG("status of process_ep_req= %d, ep = %d",
- status, ep_num);
+ dev_vdbg(&udc->gadget.dev,
+ "status of process_ep_req= %d, ep = %d\n",
+ status, ep_num);
if (status == REQ_UNCOMPLETE)
break;
/* write back status to req */
@@ -1820,7 +1829,7 @@ static void reset_irq(struct fsl_udc *udc)
while (fsl_readl(&dr_regs->endpointprime)) {
/* Wait until all endptprime bits cleared */
if (time_after(jiffies, timeout)) {
- ERR("Timeout for reset\n");
+ dev_err(&udc->gadget.dev, "Timeout for reset\n");
break;
}
cpu_relax();
@@ -1830,7 +1839,7 @@ static void reset_irq(struct fsl_udc *udc)
fsl_writel(0xffffffff, &dr_regs->endptflush);
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
- VDBG("Bus reset");
+ dev_vdbg(&udc->gadget.dev, "Bus reset\n");
/* Bus is reseting */
udc->bus_reset = 1;
/* Reset all the queues, include XD, dTD, EP queue
@@ -1838,7 +1847,7 @@ static void reset_irq(struct fsl_udc *udc)
reset_queues(udc, true);
udc->usb_state = USB_STATE_DEFAULT;
} else {
- VDBG("Controller reset");
+ dev_vdbg(&udc->gadget.dev, "Controller reset\n");
/* initialize usb hw reg except for regs for EP, not
* touch usbintr reg */
dr_controller_setup(udc);
@@ -1872,7 +1881,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
- /* VDBG("irq_src [0x%8x]", irq_src); */
+ /* dev_vdbg(&udc->gadget.dev, "irq_src [0x%8x]", irq_src); */
/* Need to resume? */
if (udc->usb_state == USB_STATE_SUSPENDED)
@@ -1881,7 +1890,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* USB Interrupt */
if (irq_src & USB_STS_INT) {
- VDBG("Packet int");
+ dev_vdbg(&udc->gadget.dev, "Packet int\n");
/* Setup package, we only support ep0 as control ep */
if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
tripwire_handler(udc, 0,
@@ -1910,7 +1919,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Reset Received */
if (irq_src & USB_STS_RESET) {
- VDBG("reset int");
+ dev_vdbg(&udc->gadget.dev, "reset int\n");
reset_irq(udc);
status = IRQ_HANDLED;
}
@@ -1922,7 +1931,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
}
if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
- VDBG("Error IRQ %x", irq_src);
+ dev_vdbg(&udc->gadget.dev, "Error IRQ %x\n", irq_src);
}
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1958,7 +1967,7 @@ static int fsl_udc_start(struct usb_gadget *g,
udc_controller->transceiver->otg,
&udc_controller->gadget);
if (retval < 0) {
- ERR("can't bind to transceiver\n");
+ dev_err(&udc_controller->gadget.dev, "can't bind to transceiver\n");
udc_controller->driver = NULL;
return retval;
}
@@ -2243,7 +2252,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
if (!udc->eps) {
- ERR("kmalloc udc endpoint status failed\n");
+ dev_err(&udc->gadget.dev, "kmalloc udc endpoint status failed\n");
goto eps_alloc_failed;
}
@@ -2258,7 +2267,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
- ERR("malloc QHs for udc failed\n");
+ dev_err(&udc->gadget.dev, "malloc QHs for udc failed\n");
goto ep_queue_alloc_failed;
}
@@ -2269,14 +2278,14 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
if (!udc->status_req) {
- ERR("kzalloc for udc status request failed\n");
+ dev_err(&udc->gadget.dev, "kzalloc for udc status request failed\n");
goto udc_status_alloc_failed;
}
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
if (!udc->status_req->req.buf) {
- ERR("kzalloc for udc request buffer failed\n");
+ dev_err(&udc->gadget.dev, "kzalloc for udc request buffer failed\n");
goto udc_req_buf_alloc_failed;
}
@@ -2373,7 +2382,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (pdata->operating_mode == FSL_USB2_DR_OTG) {
udc_controller->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
- ERR("Can't find OTG driver!\n");
+ dev_err(&udc_controller->gadget.dev, "Can't find OTG driver!\n");
ret = -ENODEV;
goto err_kfree;
}
@@ -2389,7 +2398,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
if (!request_mem_region(res->start, resource_size(res),
driver_name)) {
- ERR("request mem region for %s failed\n", pdev->name);
+ dev_err(&udc_controller->gadget.dev, "request mem region for %s failed\n", pdev->name);
ret = -EBUSY;
goto err_kfree;
}
@@ -2420,7 +2429,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* Read Device Controller Capability Parameters register */
dccparams = fsl_readl(&dr_regs->dccparams);
if (!(dccparams & DCCPARAMS_DC)) {
- ERR("This SOC doesn't support device role\n");
+ dev_err(&udc_controller->gadget.dev, "This SOC doesn't support device role\n");
ret = -ENODEV;
goto err_exit;
}
@@ -2438,14 +2447,14 @@ static int fsl_udc_probe(struct platform_device *pdev)
ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
driver_name, udc_controller);
if (ret != 0) {
- ERR("cannot request irq %d err %d\n",
+ dev_err(&udc_controller->gadget.dev, "cannot request irq %d err %d\n",
udc_controller->irq, ret);
goto err_exit;
}
/* Initialize the udc structure including QH member and other member */
if (struct_udc_setup(udc_controller, pdev)) {
- ERR("Can't initialize udc data structure\n");
+ dev_err(&udc_controller->gadget.dev, "Can't initialize udc data structure\n");
ret = -ENOMEM;
goto err_free_irq;
}
@@ -2486,7 +2495,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* setup the udc->eps[] for non-control endpoints and link
* to gadget.ep_list */
for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
- char name[14];
+ char name[16];
sprintf(name, "ep%dout", i);
struct_ep_setup(udc_controller, i * 2, name, 1);
@@ -2666,6 +2675,15 @@ static const struct platform_device_id fsl_udc_devtype[] = {
}
};
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
+
+static const struct of_device_id fsl_udc_dt_ids[] = {
+ { .compatible = "fsl-usb2-dr" },
+ { .compatible = "fsl-usb2-mph" },
+ { .compatible = "fsl,mpc5121-usb2-dr" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_udc_dt_ids);
+
static struct platform_driver udc_driver = {
.probe = fsl_udc_probe,
.remove_new = fsl_udc_remove,
@@ -2675,6 +2693,7 @@ static struct platform_driver udc_driver = {
.resume = fsl_udc_resume,
.driver = {
.name = driver_name,
+ .of_match_table = fsl_udc_dt_ids,
/* udc suspend/resume called from OTG driver */
.suspend = fsl_udc_otg_suspend,
.resume = fsl_udc_otg_resume,
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 2efc5a930b48..cc1756f3e89d 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -508,53 +508,6 @@ struct fsl_udc {
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_DEBUG "[%s] " fmt "\n", \
- __func__, ## args)
-#else
-#define DBG(fmt, args...) do{}while(0)
-#endif
-
-#if 0
-static void dump_msg(const char *label, const u8 * buf, unsigned int length)
-{
- unsigned int start, num, i;
- char line[52], *p;
-
- if (length >= 512)
- return;
- DBG("%s, length %u:\n", label, length);
- start = 0;
- while (length > 0) {
- num = min(length, 16u);
- p = line;
- for (i = 0; i < num; ++i) {
- if (i == 8)
- *p++ = ' ';
- sprintf(p, " %02x", buf[i]);
- p += 3;
- }
- *p = 0;
- printk(KERN_DEBUG "%6x: %s\n", start, line);
- buf += num;
- start += num;
- length -= num;
- }
-}
-#endif
-
-#ifdef VERBOSE
-#define VDBG DBG
-#else
-#define VDBG(stuff...) do{}while(0)
-#endif
-
-#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warn("udc: " stuff)
-#define INFO(stuff...) pr_info("udc: " stuff)
-
-/*-------------------------------------------------------------------------*/
-
/* ### Add board specific defines here
*/
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 12e76bb62c20..19bbc38f3d35 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
goto err_req;
}
- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ ret = net2272_probe_fin(dev, irqflags);
if (ret)
goto err_io;
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 61424cfd2e1c..1a6317e4b2a3 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -24,7 +24,6 @@
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 547af2ed9e5e..ba5a06690507 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -8,7 +8,6 @@
#include <linux/extcon.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index cb85168fd00c..7aa46d426f31 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
{
- int err = 0, usb3;
- unsigned int i;
+ int err = 0, usb3_companion_port;
+ unsigned int i, j;
xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->utmi_phy), GFP_KERNEL);
@@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
dev_err_probe(xudc->dev, err,
- "failed to get usb2-%d PHY\n", i);
+ "failed to get PHY for phy-name usb2-%d\n", i);
goto clean_up;
} else if (xudc->utmi_phy[i]) {
/* Get usb-phy, if utmi phy is available */
@@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
}
/* Get USB3 phy */
- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
- if (usb3 < 0)
+ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+ if (usb3_companion_port < 0)
continue;
- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
- if (IS_ERR(xudc->usb3_phy[i])) {
- err = PTR_ERR(xudc->usb3_phy[i]);
- dev_err_probe(xudc->dev, err,
- "failed to get usb3-%d PHY\n", usb3);
- goto clean_up;
- } else if (xudc->usb3_phy[i])
- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
+ for (j = 0; j < xudc->soc->num_phys; j++) {
+ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
+ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->usb3_phy[i])) {
+ err = PTR_ERR(xudc->usb3_phy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get PHY for phy-name usb3-%d\n", j);
+ goto clean_up;
+ } else if (xudc->usb3_phy[i]) {
+ int usb2_port =
+ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
+ int usb3_port =
+ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
+ if (usb3_port == usb3_companion_port) {
+ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
+ usb2_port, usb3_port, i);
+ break;
+ }
+ }
+ }
}
return err;
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c47ab0a491d..ad145a54ca74 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -66,6 +66,15 @@ struct orion_ehci_hcd {
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
+ * Legacy DMA mask is 32 bit.
+ * AC5 has the DDR starting at 8GB, hence it requires
+ * a larger (34-bit) DMA mask, in order for DMA allocations
+ * to succeed:
+ */
+static const u64 dma_mask_orion = DMA_BIT_MASK(32);
+static const u64 dma_mask_ac5 = DMA_BIT_MASK(34);
+
+/*
* Implement Orion USB controller specification guidelines
*/
static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
@@ -211,6 +220,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
int irq, err;
enum orion_ehci_phy_ver phy_version;
struct orion_ehci_hcd *priv;
+ u64 *dma_mask_ptr;
if (usb_disabled())
return -ENODEV;
@@ -228,7 +238,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_mask_ptr = (u64 *)of_device_get_match_data(&pdev->dev);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, *dma_mask_ptr);
if (err)
goto err;
@@ -332,8 +343,9 @@ static void ehci_orion_drv_remove(struct platform_device *pdev)
}
static const struct of_device_id ehci_orion_dt_ids[] = {
- { .compatible = "marvell,orion-ehci", },
- { .compatible = "marvell,armada-3700-ehci", },
+ { .compatible = "marvell,orion-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,armada-3700-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,ac5-ehci", .data = &dma_mask_ac5},
{},
};
MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 357d9aee38a3..3348c25ddb18 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 0956495bba57..2b871540bb50 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
finish_request(sl811, ep, urb, urbstat);
}
+#ifdef QUIRK2
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
#endif
return irqstat;
}
+#endif
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
new file mode 100644
index 000000000000..9e94cebf4a56
--- /dev/null
+++ b/drivers/usb/host/xhci-caps.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p) ((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p) ((p) & (1 << 11))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+/* HCCPARAMS2 - hcc_params2 - bitmasks */
+/* true: HC supports U3 entry Capability */
+#define HCC2_U3C(p) ((p) & (1 << 0))
+/* true: HC supports Configure endpoint command Max exit latency too large */
+#define HCC2_CMC(p) ((p) & (1 << 1))
+/* true: HC supports Force Save context Capability */
+#define HCC2_FSC(p) ((p) & (1 << 2))
+/* true: HC supports Compliance Transition Capability */
+#define HCC2_CTC(p) ((p) & (1 << 3))
+/* true: HC support Large ESIT payload Capability > 48k */
+#define HCC2_LEC(p) ((p) & (1 << 4))
+/* true: HC support Configuration Information Capability */
+#define HCC2_CIC(p) ((p) & (1 << 5))
+/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+#define HCC2_ETC(p) ((p) & (1 << 6))
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index d82935d31126..8a9869ef0db6 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -634,7 +634,8 @@ static int xhci_dbc_start(struct xhci_dbc *dbc)
return ret;
}
- return mod_delayed_work(system_wq, &dbc->event_work, 1);
+ return mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(dbc->poll_interval));
}
static void xhci_dbc_stop(struct xhci_dbc *dbc)
@@ -899,8 +900,10 @@ static void xhci_dbc_handle_events(struct work_struct *work)
enum evtreturn evtr;
struct xhci_dbc *dbc;
unsigned long flags;
+ unsigned int poll_interval;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ poll_interval = dbc->poll_interval;
spin_lock_irqsave(&dbc->lock, flags);
evtr = xhci_dbc_do_handle_events(dbc);
@@ -916,13 +919,18 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
+ /* set fast poll rate if there are pending data transfers */
+ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+ !list_empty(&dbc->eps[BULK_IN].list_pending))
+ poll_interval = 1;
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
}
- mod_delayed_work(system_wq, &dbc->event_work, 1);
+ mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(poll_interval));
}
static const char * const dbc_state_strings[DS_MAX] = {
@@ -1175,6 +1183,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
dbc->idVendor = DBC_VENDOR_ID;
dbc->bcdDevice = DBC_DEVICE_REV;
dbc->bInterfaceProtocol = DBC_PROTOCOL;
+ dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
goto err;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index e39e3ae1677a..92661b555c2a 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -94,6 +94,7 @@ struct dbc_ep {
#define DBC_QUEUE_SIZE 16
#define DBC_WRITE_BUF_SIZE 8192
+#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
/*
* Private structure for DbC hardware state:
@@ -140,6 +141,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
+ unsigned int poll_interval; /* ms */
unsigned resume_required:1;
struct dbc_ep eps[2];
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0980ade2a234..61f083de6e19 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -448,38 +448,6 @@ u32 xhci_port_state_to_neutral(u32 state)
}
EXPORT_SYMBOL_GPL(xhci_port_state_to_neutral);
-/**
- * xhci_find_slot_id_by_port() - Find slot id of a usb device on a roothub port
- * @hcd: pointer to hcd of the roothub
- * @xhci: pointer to xhci structure
- * @port: one-based port number of the port in this roothub.
- *
- * Return: Slot id of the usb device connected to the root port, 0 if not found
- */
-
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port)
-{
- int slot_id;
- int i;
- enum usb_device_speed speed;
-
- slot_id = 0;
- for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i] || !xhci->devs[i]->udev)
- continue;
- speed = xhci->devs[i]->udev->speed;
- if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
- && xhci->devs[i]->fake_port == port) {
- slot_id = i;
- break;
- }
- }
-
- return slot_id;
-}
-EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
-
/*
* Stop device
* It issues stop endpoint command for EP 0 to 30. And wait the last command
@@ -930,7 +898,6 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- int slot_id;
u32 wIndex;
hcd = port->rhub->hcd;
@@ -986,13 +953,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
return -ENODEV;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
} else {
int port_status = readl(port->addr);
@@ -1202,7 +1167,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags;
u32 temp, status;
int retval = 0;
- int slot_id;
struct xhci_bus_state *bus_state;
u16 link_state = 0;
u16 wake_mask = 0;
@@ -1332,15 +1296,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
}
/* unlock to execute stop endpoint commands */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port, XDEV_U3);
@@ -1463,14 +1425,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (slot_id) {
+ if (port->slot_id) {
/* unlock to execute stop endpoint
* commands */
spin_unlock_irqrestore(&xhci->lock,
flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
@@ -1584,13 +1544,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
bus_state->port_c_suspend |= 1 << wIndex;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
@@ -1821,10 +1779,7 @@ retry:
if (!portsc_buf[port_index])
continue;
if (test_bit(port_index, &bus_state->bus_suspended)) {
- int slot_id;
-
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- port_index + 1);
+ int slot_id = ports[port_index]->slot_id;
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
@@ -1877,7 +1832,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
struct xhci_bus_state *bus_state;
unsigned long flags;
int max_ports, port_index;
- int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
@@ -1970,9 +1924,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
continue;
}
xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
- if (slot_id)
- xhci_ring_device(xhci, slot_id);
+ if (ports[port_index]->slot_id)
+ xhci_ring_device(xhci, ports[port_index]->slot_id);
}
(void) readl(&xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a7716202a8dd..69dd86669883 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -84,7 +84,7 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *seg;
seg = first->next;
- while (seg != first) {
+ while (seg && seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
@@ -351,17 +351,10 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
flags);
- if (!next) {
- prev = *first;
- while (prev) {
- next = prev->next;
- xhci_segment_free(xhci, prev);
- prev = next;
- }
- return -ENOMEM;
- }
- xhci_link_segments(prev, next, type, chain_links);
+ if (!next)
+ goto free_segments;
+ xhci_link_segments(prev, next, type, chain_links);
prev = next;
num++;
}
@@ -369,6 +362,10 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
*last = prev;
return 0;
+
+free_segments:
+ xhci_free_segments_for_ring(xhci, *first);
+ return -ENOMEM;
}
/*
@@ -444,19 +441,11 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
- if (ring->type == TYPE_STREAM)
+ if (ring->type == TYPE_STREAM) {
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring, first, last, flags);
- if (ret) {
- struct xhci_segment *next;
- do {
- next = first->next;
- xhci_segment_free(xhci, first);
- if (first == last)
- break;
- first = next;
- } while (true);
- return ret;
+ if (ret)
+ goto free_segments;
}
xhci_link_rings(xhci, ring, first, last, num_new_segs);
@@ -466,6 +455,10 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->num_segs);
return 0;
+
+free_segments:
+ xhci_free_segments_for_ring(xhci, first);
+ return ret;
}
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -789,15 +782,14 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
bool slot_found = false;
/* If the device never made it past the Set Address stage,
- * it may not have the real_port set correctly.
+ * it may not have the root hub port pointer set correctly.
*/
- if (virt_dev->real_port == 0 ||
- virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad real port.\n");
+ if (!virt_dev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
return;
}
- tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
@@ -834,7 +826,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
- &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
@@ -908,6 +900,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->udev && dev->udev->slot_id)
dev->udev->slot_id = 0;
+ if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
+ dev->rhub_port->slot_id = 0;
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
@@ -929,13 +923,12 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
if (!vdev)
return;
- if (vdev->real_port == 0 ||
- vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ if (!vdev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
goto out;
}
- tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
@@ -1051,16 +1044,16 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers.
*
- * The xHCI hardware wants to know the roothub port number that the USB device
+ * The xHCI hardware wants to know the roothub port that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
- * registers. Call xhci_find_raw_port_number() to get real index.
+ * registers.
*/
-static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
- struct usb_device *udev)
+static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct usb_device *top_dev;
+ struct xhci_hub *rhub;
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
@@ -1072,7 +1065,8 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
top_dev = top_dev->parent)
/* Found device below root hub */;
- return xhci_find_raw_port_number(hcd, top_dev->portnum);
+ rhub = xhci_get_rhub(hcd);
+ return rhub->ports[top_dev->portnum - 1];
}
/* Setup an xHCI virtual device for a Set Address command */
@@ -1081,9 +1075,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- u32 port_num;
u32 max_packets;
- struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -1124,18 +1116,15 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
/* Find the root hub port this device is under */
- port_num = xhci_find_real_port_number(xhci, udev);
- if (!port_num)
+ dev->rhub_port = xhci_find_rhub_port(xhci, udev);
+ if (!dev->rhub_port)
return -EINVAL;
- slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
- /* Set the port number in the virtual_device to the faked port number */
- for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
- top_dev = top_dev->parent)
- /* Found device below root hub */;
- dev->fake_port = top_dev->portnum;
- dev->real_port = port_num;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
- xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+ /* Slot ID is set to the device directly below the root hub */
+ if (!udev->parent->parent)
+ dev->rhub_port->slot_id = udev->slot_id;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1));
+ xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n",
+ udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
@@ -1144,12 +1133,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
- dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
- rh_bw = &xhci->rh_bw[port_num - 1];
+ rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
@@ -2533,7 +2522,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_add_interrupter(xhci, ir, 0))
goto fail;
- xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
+ ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 61f3f8bbdcea..27eb384a3963 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -122,10 +122,6 @@ static u32 get_bw_boundary(enum usb_device_speed speed)
* each HS root port is treated as a single bandwidth domain,
* but each SS root port is treated as two bandwidth domains, one for IN eps,
* one for OUT eps.
-* @real_port value is defined as follow according to xHCI spec:
-* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
-* so the bandwidth domain array is organized as follow for simplification:
-* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
*/
static struct mu3h_sch_bw_info *
get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
@@ -136,19 +132,19 @@ get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
int bw_index;
virt_dev = xhci->devs[udev->slot_id];
- if (!virt_dev->real_port) {
- WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
+ if (!virt_dev->rhub_port) {
+ WARN_ONCE(1, "%s invalid rhub port\n", dev_name(&udev->dev));
return NULL;
}
if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
- bw_index = (virt_dev->real_port - 1) * 2;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2;
else
- bw_index = (virt_dev->real_port - 1) * 2 + 1;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2 + 1;
} else {
/* add one more for each SS port */
- bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
+ bw_index = virt_dev->rhub_port->hw_portnum + xhci->usb3_rhub.num_ports;
}
return &mtk->sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b534ca9752be..93b697648018 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -307,8 +307,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
- if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ if (pdev->vendor == PCI_VENDOR_ID_AMD) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->device == 0x43f7)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ }
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
@@ -820,7 +823,6 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port *port;
struct usb_device *udev;
- unsigned int slot_id;
u32 portsc;
int i;
@@ -843,15 +845,14 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
if ((portsc & PORT_PLS_MASK) != XDEV_U3)
continue;
- slot_id = xhci_find_slot_id_by_port(port->rhub->hcd, xhci,
- port->hcd_portnum + 1);
- if (!slot_id || !xhci->devs[slot_id]) {
+ if (!port->slot_id || !xhci->devs[port->slot_id]) {
xhci_err(xhci, "No dev for slot_id %d for port %d-%d in U3\n",
- slot_id, port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
+ port->slot_id, port->rhub->hcd->self.busnum,
+ port->hcd_portnum + 1);
continue;
}
- udev = xhci->devs[slot_id]->udev;
+ udev = xhci->devs[port->slot_id]->udev;
/* if wakeup is enabled then don't disable the port */
if (udev->do_remote_wakeup && do_wakeup)
diff --git a/drivers/usb/host/xhci-port.h b/drivers/usb/host/xhci-port.h
new file mode 100644
index 000000000000..f19efb966d18
--- /dev/null
+++ b/drivers/usb/host/xhci-port.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+#define PORT_PLS_MASK (0xf << 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
+#define XDEV_U2 (0x2 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_DISABLED (0x4 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
+#define XDEV_HOT_RESET (0x9 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
+#define XDEV_TEST_MODE (0xb << 5)
+#define XDEV_RESUME (0xf << 5)
+
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is non-removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+#define PORT_U1_TIMEOUT_MASK 0xff
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+#define PORT_U2_TIMEOUT_MASK (0xff << 8)
+/* Bits 24:31 for port testing */
+
+/* USB2 Protocol PORTSPMSC */
+#define PORT_L1S_MASK 7
+#define PORT_L1S_SUCCESS 1
+#define PORT_RWE (1 << 3)
+#define PORT_HIRD(p) (((p) & 0xf) << 4)
+#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS_MASK (0xff << 8)
+#define PORT_L1DS(p) (((p) & 0xff) << 8)
+#define PORT_HLE (1 << 16)
+#define PORT_TEST_MODE_SHIFT 28
+
+/* USB3 Protocol PORTLI Port Link Information */
+#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
+#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT 512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL 4
+
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4f64b814d4aa..52278afea94b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -113,6 +113,12 @@ static bool last_td_in_urb(struct xhci_td *td)
return urb_priv->num_tds_done == urb_priv->num_tds;
}
+static bool unhandled_event_trb(struct xhci_ring *ring)
+{
+ return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
+ ring->cycle_state);
+}
+
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
@@ -1154,6 +1160,15 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
+ case EP_STATE_STOPPED:
+ /*
+ * NEC uPD720200 sometimes sets this state and fails with
+ * Context Error while continuing to process TRBs.
+ * Be conservative and trust EP_CTX_STATE on other chips.
+ */
+ if (!(xhci->quirks & XHCI_NEC_HOST))
+ break;
+ fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
@@ -1870,7 +1885,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
u32 port_id;
u32 portsc, cmd_reg;
int max_ports;
- int slot_id;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
@@ -1922,9 +1936,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1982,9 +1995,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci_ring_device(xhci, slot_id);
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci_ring_device(xhci, port->slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
@@ -2816,7 +2828,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num--;
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+ ep_seg = trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, ep_trb_dma, false);
/*
@@ -2884,9 +2896,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
"part of current TD ep_index %d "
"comp_code %u\n", ep_index,
trb_comp_code);
- trb_in_td(xhci, ep_ring->deq_seg,
- ep_ring->dequeue, td->last_trb,
- ep_trb_dma, true);
+ trb_in_td(xhci, td->start_seg, td->first_trb,
+ td->last_trb, ep_trb_dma, true);
return -ESHUTDOWN;
}
}
@@ -2962,32 +2973,18 @@ err_out:
}
/*
- * This function handles all OS-owned events on the event ring. It may drop
+ * This function handles one OS-owned event on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
- * Returns >0 for "possibly more events to process" (caller should call again),
- * otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
- union xhci_trb *event;
u32 trb_type;
- /* Event ring hasn't been allocated yet. */
- if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
- xhci_err(xhci, "ERROR interrupter not ready\n");
- return -ENOMEM;
- }
-
- event = ir->event_ring->dequeue;
- /* Does the HC or OS own the TRB? */
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- ir->event_ring->cycle_state)
- return 0;
-
trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
- * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * Barrier between reading the TRB_CYCLE (valid) flag before, and any
* speculative reads of the event's flags/data below.
*/
rmb();
@@ -3017,18 +3014,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "xHCI host dying, returning from "
- "event handler.\n");
- return 0;
+ xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
+ return -ENODEV;
}
- /* Update SW event ring dequeue pointer */
- inc_deq(xhci, ir->event_ring);
-
- /* Are there more items on the event ring? Caller will call us again to
- * check.
- */
- return 1;
+ return 0;
}
/*
@@ -3038,30 +3028,26 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
- union xhci_trb *event_ring_deq,
bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != ir->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
- ir->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
- /*
- * Per 4.9.4, Software writes to the ERDP register shall
- * always advance the Event Ring Dequeue Pointer value.
- */
- if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK))
- return;
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall always advance
+ * the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
+ return;
- /* Update HC event ring dequeue pointer */
- temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
- temp_64 |= deq & ERST_PTR_MASK;
- }
+ /* Update HC event ring dequeue pointer */
+ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
+ temp_64 |= deq & ERST_PTR_MASK;
/* Clear the event handler busy flag (RW1C) */
if (clear_ehb)
@@ -3069,6 +3055,76 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
+/* Clear the interrupt pending bit for a specific interrupter. */
+static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir)
+{
+ if (!ir->ip_autoclear) {
+ u32 irq_pending;
+
+ irq_pending = readl(&ir->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &ir->ir_set->irq_pending);
+ }
+}
+
+/*
+ * Handle all OS-owned events on an interrupter event ring. It may drop
+ * and reaquire xhci->lock between event processing.
+ */
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ int event_loop = 0;
+ int err;
+ u64 temp;
+
+ xhci_clear_interrupt_pending(xhci, ir);
+
+ /* Event ring hasn't been allocated yet. */
+ if (!ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter event ring not ready\n");
+ return -ENOMEM;
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
+ return -ENODEV;
+ }
+
+ /* Process all OS owned event TRBs on this event ring */
+ while (unhandled_event_trb(ir->event_ring)) {
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+
+ /*
+ * If half a segment of events have been handled in one go then
+ * update ERDP, and force isoc trbs to interrupt more often
+ */
+ if (event_loop++ > TRBS_PER_SEGMENT / 2) {
+ xhci_update_erst_dequeue(xhci, ir, false);
+
+ if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+ ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
+
+ event_loop = 0;
+ }
+
+ /* Update SW event ring dequeue pointer */
+ inc_deq(xhci, ir->event_ring);
+
+ if (err)
+ break;
+ }
+
+ xhci_update_erst_dequeue(xhci, ir, true);
+
+ return 0;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3077,12 +3133,8 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- union xhci_trb *event_ring_deq;
- struct xhci_interrupter *ir;
irqreturn_t ret = IRQ_NONE;
- u64 temp_64;
u32 status;
- int event_loop = 0;
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -3115,50 +3167,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
-
- /* This is the handler of the primary interrupter */
- ir = xhci->interrupters[0];
- if (!hcd->msi_enabled) {
- u32 irq_pending;
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
- }
-
- if (xhci->xhc_state & XHCI_STATE_DYING ||
- xhci->xhc_state & XHCI_STATE_HALTED) {
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- /* Clear the event handler busy flag (RW1C);
- * the event ring should be empty.
- */
- temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB,
- &ir->ir_set->erst_dequeue);
- ret = IRQ_HANDLED;
- goto out;
- }
-
- event_ring_deq = ir->event_ring->dequeue;
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- while (xhci_handle_event(xhci, ir) > 0) {
- if (event_loop++ < TRBS_PER_SEGMENT / 2)
- continue;
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
- event_ring_deq = ir->event_ring->dequeue;
-
- /* ring is half-full, force isoc trbs to interrupt more often */
- if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
- xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
-
- event_loop = 0;
- }
-
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
ret = IRQ_HANDLED;
+ /* This is the handler of the primary interrupter */
+ xhci_handle_events(xhci, xhci->interrupters[0]);
out:
spin_unlock(&xhci->lock);
@@ -4019,7 +4031,8 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
}
/* Check if we should generate event interrupt for a TD in an isoc URB */
-static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
+static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
+ struct xhci_interrupter *ir)
{
if (xhci->hci_version < 0x100)
return false;
@@ -4030,8 +4043,8 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
* If AVOID_BEI is set the host handles full event rings poorly,
* generate an event at least every 8th TD to clear the event ring
*/
- if (i && xhci->quirks & XHCI_AVOID_BEI)
- return !!(i % xhci->isoc_bei_interval);
+ if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
+ return !!(i % ir->isoc_bei_interval);
return true;
}
@@ -4040,6 +4053,7 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
+ struct xhci_interrupter *ir;
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
@@ -4057,6 +4071,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ir = xhci->interrupters[0];
num_tds = urb->number_of_packets;
if (num_tds < 1) {
@@ -4144,7 +4159,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
- if (trb_block_event_intr(xhci, num_tds, i))
+ if (trb_block_event_intr(xhci, num_tds, i, ir))
field |= TRB_BEI;
}
/* Calculate TRB length */
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index ac47b1c0544a..1740000d54c2 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -172,8 +172,8 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(u8, fake_port)
- __field(u8, real_port)
+ __field(int, hcd_portnum)
+ __field(int, hw_portnum)
__field(u16, current_mel)
),
@@ -181,13 +181,13 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->fake_port = (u8) vdev->fake_port;
- __entry->real_port = (u8) vdev->real_port;
+ __entry->hcd_portnum = (int) vdev->rhub_port->hcd_portnum;
+ __entry->hw_portnum = (int) vdev->rhub_port->hw_portnum;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+ TP_printk("vdev %p ctx %llx | %llx hcd_portnum %d hw_portnum %d current_mel %d",
__entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->fake_port, __entry->real_port, __entry->current_mel
+ __entry->hcd_portnum, __entry->hw_portnum, __entry->current_mel
)
);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c057c42c36f4..8579603edaff 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -346,6 +346,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
return 0;
}
+/* interrupt moderation interval imod_interval in nanoseconds */
+static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
+{
+ u32 imod;
+
+ if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ return -EINVAL;
+
+ imod = readl(&ir->ir_set->irq_control);
+ imod &= ~ER_IRQ_INTERVAL_MASK;
+ imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
+ writel(imod, &ir->ir_set->irq_control);
+
+ return 0;
+}
+
static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
@@ -528,7 +545,6 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
*/
int xhci_run(struct usb_hcd *hcd)
{
- u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -538,6 +554,9 @@ int xhci_run(struct usb_hcd *hcd)
*/
hcd->uses_new_polling = 1;
+ if (hcd->msi_enabled)
+ ir->ip_autoclear = true;
+
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
@@ -548,12 +567,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set the interrupt modulation register");
- temp = readl(&ir->ir_set->irq_control);
- temp &= ~ER_IRQ_INTERVAL_MASK;
- temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(temp, &ir->ir_set->irq_control);
+ xhci_set_interrupter_moderation(ir, xhci->imod_interval);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -780,19 +794,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
seg = seg->next;
} while (seg != ring->deq_seg);
- /* Reset the software enqueue and dequeue pointers */
- ring->deq_seg = ring->first_seg;
- ring->dequeue = ring->first_seg->trbs;
- ring->enq_seg = ring->deq_seg;
- ring->enqueue = ring->dequeue;
-
- ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
- /*
- * Ring is now zeroed, so the HW should look for change of ownership
- * when the cycle bit is set to 1.
- */
- ring->cycle_state = 1;
-
+ xhci_initialize_ring_info(ring, 1);
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
@@ -1217,6 +1219,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
temp = kzalloc_node(buf_len, GFP_ATOMIC,
dev_to_node(hcd->self.sysdev));
+ if (!temp)
+ return -ENOMEM;
if (usb_urb_dir_out(urb))
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
@@ -2259,7 +2263,7 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
struct xhci_tt_bw_info *tt_info;
/* Find the bandwidth table for the root port this TT is attached to. */
- bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+ bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
tt_info = virt_dev->tt_info;
/* If this TT already had active endpoints, the bandwidth for this TT
* has already been added. Removing all periodic endpoints (and thus
@@ -2377,7 +2381,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
if (virt_dev->tt_info) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
@@ -2390,7 +2394,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
}
/* Add in how much bandwidth will be used for interval zero, or the
@@ -2487,14 +2491,12 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
bw_used += overhead + packet_size;
if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
- unsigned int port_index = virt_dev->real_port - 1;
-
/* OK, we're manipulating a HS device attached to a
* root port bandwidth domain. Include the number of active TTs
* in the bandwidth used.
*/
bw_used += TT_HS_OVERHEAD *
- xhci->rh_bw[port_index].num_active_tts;
+ xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -2681,7 +2683,7 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
if (!virt_dev->tt_info)
return;
- rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+ rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
if (old_active_eps == 0 &&
virt_dev->tt_info->active_eps != 0) {
rh_bw_info->num_active_tts += 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6f82d404883f..6f4bf98a6282 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -22,6 +22,9 @@
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
+#include "xhci-port.h"
+#include "xhci-caps.h"
+
/* max buffer size for trace and debug messages */
#define XHCI_MSG_MAX 500
@@ -62,90 +65,6 @@ struct xhci_cap_regs {
/* Reserved up to (CAPLENGTH - 0x1C) */
};
-/* hc_capbase bitmasks */
-/* bits 7:0 - how long is the Capabilities register */
-#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
-/* bits 31:16 */
-#define HC_VERSION(p) (((p) >> 16) & 0xffff)
-
-/* HCSPARAMS1 - hcs_params1 - bitmasks */
-/* bits 0:7, Max Device Slots */
-#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
-#define HCS_SLOTS_MASK 0xff
-/* bits 8:18, Max Interrupters */
-#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
-/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
-#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
-
-/* HCSPARAMS2 - hcs_params2 - bitmasks */
-/* bits 0:3, frames or uframes that SW needs to queue transactions
- * ahead of the HW to meet periodic deadlines */
-#define HCS_IST(p) (((p) >> 0) & 0xf)
-/* bits 4:7, max number of Event Ring segments */
-#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
-/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
-/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
-
-/* HCSPARAMS3 - hcs_params3 - bitmasks */
-/* bits 0:7, Max U1 to U0 latency for the roothub ports */
-#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
-/* bits 16:31, Max U2 to U0 latency for the roothub ports */
-#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
-
-/* HCCPARAMS - hcc_params - bitmasks */
-/* true: HC can use 64-bit address pointers */
-#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
-/* true: HC can do bandwidth negotiation */
-#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
-/* true: HC uses 64-byte Device Context structures
- * FIXME 64-byte context structures aren't supported yet.
- */
-#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
-/* true: HC has port power switches */
-#define HCC_PPC(p) ((p) & (1 << 3))
-/* true: HC has port indicators */
-#define HCS_INDICATOR(p) ((p) & (1 << 4))
-/* true: HC has Light HC Reset Capability */
-#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
-/* true: HC supports latency tolerance messaging */
-#define HCC_LTC(p) ((p) & (1 << 6))
-/* true: no secondary Stream ID Support */
-#define HCC_NSS(p) ((p) & (1 << 7))
-/* true: HC supports Stopped - Short Packet */
-#define HCC_SPC(p) ((p) & (1 << 9))
-/* true: HC has Contiguous Frame ID Capability */
-#define HCC_CFC(p) ((p) & (1 << 11))
-/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
-#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
-/* Extended Capabilities pointer from PCI base - section 5.3.6 */
-#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
-
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-
-/* db_off bitmask - bits 0:1 reserved */
-#define DBOFF_MASK (~0x3)
-
-/* run_regs_off bitmask - bits 0:4 reserved */
-#define RTSOFF_MASK (~0x1f)
-
-/* HCCPARAMS2 - hcc_params2 - bitmasks */
-/* true: HC supports U3 entry Capability */
-#define HCC2_U3C(p) ((p) & (1 << 0))
-/* true: HC supports Configure endpoint command Max exit latency too large */
-#define HCC2_CMC(p) ((p) & (1 << 1))
-/* true: HC supports Force Save context Capability */
-#define HCC2_FSC(p) ((p) & (1 << 2))
-/* true: HC supports Compliance Transition Capability */
-#define HCC2_CTC(p) ((p) & (1 << 3))
-/* true: HC support Large ESIT payload Capability > 48k */
-#define HCC2_LEC(p) ((p) & (1 << 4))
-/* true: HC support Configuration Information Capability */
-#define HCC2_CIC(p) ((p) & (1 << 5))
-/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
-#define HCC2_ETC(p) ((p) & (1 << 6))
-
/* Number of registers per port */
#define NUM_PORT_REGS 4
@@ -291,181 +210,6 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
-/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
-/* true: device connected */
-#define PORT_CONNECT (1 << 0)
-/* true: port enabled */
-#define PORT_PE (1 << 1)
-/* bit 2 reserved and zeroed */
-/* true: port has an over-current condition */
-#define PORT_OC (1 << 3)
-/* true: port reset signaling asserted */
-#define PORT_RESET (1 << 4)
-/* Port Link State - bits 5:8
- * A read gives the current link PM state of the port,
- * a write with Link State Write Strobe set sets the link state.
- */
-#define PORT_PLS_MASK (0xf << 5)
-#define XDEV_U0 (0x0 << 5)
-#define XDEV_U1 (0x1 << 5)
-#define XDEV_U2 (0x2 << 5)
-#define XDEV_U3 (0x3 << 5)
-#define XDEV_DISABLED (0x4 << 5)
-#define XDEV_RXDETECT (0x5 << 5)
-#define XDEV_INACTIVE (0x6 << 5)
-#define XDEV_POLLING (0x7 << 5)
-#define XDEV_RECOVERY (0x8 << 5)
-#define XDEV_HOT_RESET (0x9 << 5)
-#define XDEV_COMP_MODE (0xa << 5)
-#define XDEV_TEST_MODE (0xb << 5)
-#define XDEV_RESUME (0xf << 5)
-
-/* true: port has power (see HCC_PPC) */
-#define PORT_POWER (1 << 9)
-/* bits 10:13 indicate device speed:
- * 0 - undefined speed - port hasn't be initialized by a reset yet
- * 1 - full speed
- * 2 - low speed
- * 3 - high speed
- * 4 - super speed
- * 5-15 reserved
- */
-#define DEV_SPEED_MASK (0xf << 10)
-#define XDEV_FS (0x1 << 10)
-#define XDEV_LS (0x2 << 10)
-#define XDEV_HS (0x3 << 10)
-#define XDEV_SS (0x4 << 10)
-#define XDEV_SSP (0x5 << 10)
-#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
-#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
-#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
-#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
-#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
-#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
-#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
-#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
-
-/* Bits 20:23 in the Slot Context are the speed for the device */
-#define SLOT_SPEED_FS (XDEV_FS << 10)
-#define SLOT_SPEED_LS (XDEV_LS << 10)
-#define SLOT_SPEED_HS (XDEV_HS << 10)
-#define SLOT_SPEED_SS (XDEV_SS << 10)
-#define SLOT_SPEED_SSP (XDEV_SSP << 10)
-/* Port Indicator Control */
-#define PORT_LED_OFF (0 << 14)
-#define PORT_LED_AMBER (1 << 14)
-#define PORT_LED_GREEN (2 << 14)
-#define PORT_LED_MASK (3 << 14)
-/* Port Link State Write Strobe - set this when changing link state */
-#define PORT_LINK_STROBE (1 << 16)
-/* true: connect status change */
-#define PORT_CSC (1 << 17)
-/* true: port enable change */
-#define PORT_PEC (1 << 18)
-/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
- * into an enabled state, and the device into the default state. A "warm" reset
- * also resets the link, forcing the device through the link training sequence.
- * SW can also look at the Port Reset register to see when warm reset is done.
- */
-#define PORT_WRC (1 << 19)
-/* true: over-current change */
-#define PORT_OCC (1 << 20)
-/* true: reset change - 1 to 0 transition of PORT_RESET */
-#define PORT_RC (1 << 21)
-/* port link status change - set on some port link state transitions:
- * Transition Reason
- * ------------------------------------------------------------------------------
- * - U3 to Resume Wakeup signaling from a device
- * - Resume to Recovery to U0 USB 3.0 device resume
- * - Resume to U0 USB 2.0 device resume
- * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
- * - U3 to U0 Software resume of USB 2.0 device complete
- * - U2 to U0 L1 resume of USB 2.1 device complete
- * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
- * - U0 to disabled L1 entry error with USB 2.1 device
- * - Any state to inactive Error on USB 3.0 port
- */
-#define PORT_PLC (1 << 22)
-/* port configure error change - port failed to configure its link partner */
-#define PORT_CEC (1 << 23)
-#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
- PORT_RC | PORT_PLC | PORT_CEC)
-
-
-/* Cold Attach Status - xHC can set this bit to report device attached during
- * Sx state. Warm port reset should be perfomed to clear this bit and move port
- * to connected state.
- */
-#define PORT_CAS (1 << 24)
-/* wake on connect (enable) */
-#define PORT_WKCONN_E (1 << 25)
-/* wake on disconnect (enable) */
-#define PORT_WKDISC_E (1 << 26)
-/* wake on over-current (enable) */
-#define PORT_WKOC_E (1 << 27)
-/* bits 28:29 reserved */
-/* true: device is non-removable - for USB 3.0 roothub emulation */
-#define PORT_DEV_REMOVE (1 << 30)
-/* Initiate a warm port reset - complete when PORT_WRC is '1' */
-#define PORT_WR (1 << 31)
-
-/* We mark duplicate entries with -1 */
-#define DUPLICATE_ENTRY ((u8)(-1))
-
-/* Port Power Management Status and Control - port_power_base bitmasks */
-/* Inactivity timer value for transitions into U1, in microseconds.
- * Timeout can be up to 127us. 0xFF means an infinite timeout.
- */
-#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
-#define PORT_U1_TIMEOUT_MASK 0xff
-/* Inactivity timer value for transitions into U2 */
-#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
-#define PORT_U2_TIMEOUT_MASK (0xff << 8)
-/* Bits 24:31 for port testing */
-
-/* USB2 Protocol PORTSPMSC */
-#define PORT_L1S_MASK 7
-#define PORT_L1S_SUCCESS 1
-#define PORT_RWE (1 << 3)
-#define PORT_HIRD(p) (((p) & 0xf) << 4)
-#define PORT_HIRD_MASK (0xf << 4)
-#define PORT_L1DS_MASK (0xff << 8)
-#define PORT_L1DS(p) (((p) & 0xff) << 8)
-#define PORT_HLE (1 << 16)
-#define PORT_TEST_MODE_SHIFT 28
-
-/* USB3 Protocol PORTLI Port Link Information */
-#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
-#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
-
-/* USB2 Protocol PORTHLPMC */
-#define PORT_HIRDM(p)((p) & 3)
-#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
-#define PORT_BESLD(p)(((p) & 0xf) << 10)
-
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT 512
-
-/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
- * Safe to use with mixed HIRD and BESL systems (host and device) and is used
- * by other operating systems.
- *
- * XHCI 1.0 errata 8/14/12 Table 13 notes:
- * "Software should choose xHC BESL/BESLD field values that do not violate a
- * device's resume latency requirements,
- * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
- * or not program values < '4' if BLC = '0' and a BESL device is attached.
- */
-#define XHCI_DEFAULT_BESL 4
-
-/*
- * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
- * to complete link training. usually link trainig completes much faster
- * so check status 10 times with 36ms sleep in places we need to wait for
- * polling to complete.
- */
-#define XHCI_PORT_POLLING_LFPS_TIME 36
-
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -995,8 +739,7 @@ struct xhci_virt_device {
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
struct xhci_virt_ep eps[EP_CTX_PER_DEV];
- u8 fake_port;
- u8 real_port;
+ struct xhci_port *rhub_port;
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
/*
@@ -1688,6 +1431,8 @@ struct xhci_interrupter {
struct xhci_erst erst;
struct xhci_intr_reg __iomem *ir_set;
unsigned int intr_num;
+ bool ip_autoclear;
+ u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
u32 s3_irq_pending;
u32 s3_irq_control;
@@ -1717,6 +1462,8 @@ struct xhci_port {
unsigned int lpm_incapable:1;
unsigned long resume_timestamp;
bool rexit_active;
+ /* Slot ID is the index of the device directly connected to the port */
+ int slot_id;
struct completion rexit_done;
struct completion u3exit_done;
};
@@ -1760,7 +1507,6 @@ struct xhci_hcd {
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- u32 isoc_bei_interval;
int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
@@ -2200,8 +1946,6 @@ unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 67f098579fb4..7b7e1554ea20 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -631,7 +631,6 @@ static int mdc800_device_open (struct inode* inode, struct file *file)
mdc800->camera_busy=0;
mdc800->camera_request_ready=0;
- retval=0;
mdc800->irq_urb->dev = mdc800->dev;
retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL);
if (retval) {
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 0dd2b032c90b..c6101ed2d9d4 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -260,7 +260,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
if (!hub)
return -ENOMEM;
- hub->pdata = device_get_match_data(&pdev->dev);
+ hub->pdata = device_get_match_data(dev);
if (!hub->pdata)
return -EINVAL;
@@ -454,6 +454,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
{ USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index f360d5cf8d8a..b4b15d45f84d 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -26,6 +26,11 @@ static const struct onboard_hub_pdata realtek_rts5411_data = {
.num_supplies = 1,
};
+static const struct onboard_hub_pdata ti_tusb8020b_data = {
+ .reset_us = 3000,
+ .num_supplies = 1,
+};
+
static const struct onboard_hub_pdata ti_tusb8041_data = {
.reset_us = 3000,
.num_supplies = 1,
@@ -62,6 +67,8 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
{ .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb451,8025", .data = &ti_tusb8020b_data, },
+ { .compatible = "usb451,8027", .data = &ti_tusb8020b_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index 9f2be22af844..7c657ea2dabd 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -34,6 +34,18 @@
#define WC0_SSUSB0_CDEN BIT(6)
#define WC0_IS_SPM_EN BIT(1)
+/* mt8195 */
+#define PERI_WK_CTRL0_8195 0x04
+#define WC0_IS_P_95 BIT(30) /* polarity */
+#define WC0_IS_C_95(x) ((u32)(((x) & 0x7) << 27))
+#define WC0_IS_EN_P3_95 BIT(26)
+#define WC0_IS_EN_P2_95 BIT(25)
+
+#define PERI_WK_CTRL1_8195 0x20
+#define WC1_IS_C_95(x) ((u32)(((x) & 0xf) << 28))
+#define WC1_IS_P_95 BIT(12)
+#define WC1_IS_EN_P0_95 BIT(6)
+
/* mt2712 etc */
#define PERI_SSUSB_SPM_CTRL 0x0
#define SSC_IP_SLEEP_EN BIT(4)
@@ -44,6 +56,9 @@ enum ssusb_uwk_vers {
SSUSB_UWK_V2,
SSUSB_UWK_V1_1 = 101, /* specific revision 1.01 */
SSUSB_UWK_V1_2, /* specific revision 1.02 */
+ SSUSB_UWK_V1_3, /* mt8195 IP0 */
+ SSUSB_UWK_V1_5 = 105, /* mt8195 IP2 */
+ SSUSB_UWK_V1_6, /* mt8195 IP3 */
};
/*
@@ -70,6 +85,21 @@ static void ssusb_wakeup_ip_sleep_set(struct ssusb_mtk *ssusb, bool enable)
msk = WC0_SSUSB0_CDEN | WC0_IS_SPM_EN;
val = enable ? msk : 0;
break;
+ case SSUSB_UWK_V1_3:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL1_8195;
+ msk = WC1_IS_EN_P0_95 | WC1_IS_C_95(0xf) | WC1_IS_P_95;
+ val = enable ? (WC1_IS_EN_P0_95 | WC1_IS_C_95(0x1)) : 0;
+ break;
+ case SSUSB_UWK_V1_5:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL0_8195;
+ msk = WC0_IS_EN_P2_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
+ val = enable ? (WC0_IS_EN_P2_95 | WC0_IS_C_95(0x1)) : 0;
+ break;
+ case SSUSB_UWK_V1_6:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL0_8195;
+ msk = WC0_IS_EN_P3_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
+ val = enable ? (WC0_IS_EN_P3_95 | WC0_IS_C_95(0x1)) : 0;
+ break;
case SSUSB_UWK_V2:
reg = ssusb->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 051c6da7cf6d..55df0ee413d8 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1744,7 +1744,6 @@ static inline void musb_g_init_endpoints(struct musb *musb)
{
u8 epnum;
struct musb_hw_ep *hw_ep;
- unsigned count = 0;
/* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
@@ -1754,17 +1753,14 @@ static inline void musb_g_init_endpoints(struct musb *musb)
epnum++, hw_ep++) {
if (hw_ep->is_shared_fifo /* || !epnum */) {
init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
- count++;
} else {
if (hw_ep->max_packet_sz_tx) {
init_peripheral_ep(musb, &hw_ep->ep_in,
epnum, 1);
- count++;
}
if (hw_ep->max_packet_sz_rx) {
init_peripheral_ep(musb, &hw_ep->ep_out,
epnum, 0);
- count++;
}
}
}
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 9ab50f26db60..8f735a86cd19 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -74,33 +74,26 @@ static void nop_reset(struct usb_phy_generic *nop)
}
/* interface to regulator framework */
-static void nop_set_vbus_draw(struct usb_phy_generic *nop, unsigned mA)
+static int nop_set_vbus(struct usb_otg *otg, bool enable)
{
- struct regulator *vbus_draw = nop->vbus_draw;
- int enabled;
- int ret;
+ int ret = 0;
+ struct usb_phy_generic *nop = dev_get_drvdata(otg->usb_phy->dev);
- if (!vbus_draw)
- return;
+ if (!nop->vbus_draw)
+ return 0;
- enabled = nop->vbus_draw_enabled;
- if (mA) {
- regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
- if (!enabled) {
- ret = regulator_enable(vbus_draw);
- if (ret < 0)
- return;
- nop->vbus_draw_enabled = 1;
- }
- } else {
- if (enabled) {
- ret = regulator_disable(vbus_draw);
- if (ret < 0)
- return;
- nop->vbus_draw_enabled = 0;
- }
+ if (enable && !nop->vbus_draw_enabled) {
+ ret = regulator_enable(nop->vbus_draw);
+ if (ret)
+ nop->vbus_draw_enabled = false;
+ else
+ nop->vbus_draw_enabled = true;
+
+ } else if (!enable && nop->vbus_draw_enabled) {
+ ret = regulator_disable(nop->vbus_draw);
+ nop->vbus_draw_enabled = false;
}
- nop->mA = mA;
+ return ret;
}
@@ -120,14 +113,9 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
- /* drawing a "unit load" is *always* OK, except for OTG */
- nop_set_vbus_draw(nop, 100);
-
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
} else {
- nop_set_vbus_draw(nop, 0);
-
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
@@ -274,6 +262,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
"could not get vbus regulator\n");
+ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ nop->vbus_draw = NULL;
+ if (IS_ERR(nop->vbus_draw))
+ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
+ "could not get vbus regulator\n");
+
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
@@ -284,6 +279,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
nop->phy.otg->usb_phy = &nop->phy;
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
+ nop->phy.otg->set_vbus = nop_set_vbus;
return 0;
}
@@ -341,6 +337,9 @@ static void usb_phy_generic_remove(struct platform_device *pdev)
struct usb_phy_generic *nop = platform_get_drvdata(pdev);
usb_remove_phy(&nop->phy);
+
+ if (nop->vbus_draw && nop->vbus_draw_enabled)
+ regulator_disable(nop->vbus_draw);
}
static const struct of_device_id nop_xceiv_dt_ids[] = {
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 4b468bde19cf..06e0fb23566c 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -699,7 +699,7 @@ out:
}
EXPORT_SYMBOL_GPL(usb_add_phy);
-static struct device_type usb_phy_dev_type = {
+static const struct device_type usb_phy_dev_type = {
.name = "usb_phy",
.uevent = usb_phy_uevent,
};
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 70165dd86b5d..d7aa913ceb8a 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -7,6 +7,7 @@
* Hans de Goede <hdegoede@redhat.com>
*/
+#include <linux/component.h>
#include <linux/usb/role.h>
#include <linux/property.h>
#include <linux/device.h>
@@ -36,6 +37,32 @@ struct usb_role_switch {
#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, "usb-role-switch");
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, "usb-role-switch");
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
/**
* usb_role_switch_set_role - Set USB role for a switch
* @sw: USB role switch
@@ -361,6 +388,12 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ if (dev_fwnode(&sw->dev)) {
+ ret = component_add(&sw->dev, &connector_ops);
+ if (ret)
+ dev_warn(&sw->dev, "failed to add component\n");
+ }
+
sw->registered = true;
/* TODO: Symlinks for the host port and the device controller. */
@@ -377,10 +410,12 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw)) {
- sw->registered = false;
- device_unregister(&sw->dev);
- }
+ if (IS_ERR_OR_NULL(sw))
+ return;
+ sw->registered = false;
+ if (dev_fwnode(&sw->dev))
+ component_del(&sw->dev, &connector_ops);
+ device_unregister(&sw->dev);
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 923e0ed85444..21fd26609252 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
+ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
@@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
{ USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
@@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 13a56783830d..76a04ab41100 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /* GMC devices */
+ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
{ } /* Terminating entry */
};
@@ -2610,7 +2612,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
struct device *ddev = &port->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
- unsigned int cflag = termios->c_cflag;
+ unsigned int cflag;
u16 value, index;
int ret;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21a2b5a25fc0..5ee60ba2a73c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1606,3 +1606,9 @@
#define UBLOX_VID 0x1546
#define UBLOX_C099F9P_ZED_PID 0x0502
#define UBLOX_C099F9P_ODIN_PID 0x0503
+
+/*
+ * GMC devices
+ */
+#define GMC_VID 0x1cd7
+#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 93b17e0e05a3..0a783985197c 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -921,7 +921,6 @@ static void usa28_indat_callback(struct urb *urb)
port = urb->context;
p_priv = usb_get_serial_port_data(port);
- data = urb->transfer_buffer;
if (urb != p_priv->in_urbs[p_priv->in_flip])
return;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2ae124c49d44..55a65d941ccb 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
+/* MeiG Smart Technology products */
+#define MEIGSMART_VENDOR_ID 0x2dee
+/* MeiG Smart SLM320 based on UNISOC UIS8910 */
+#define MEIGSMART_PRODUCT_SLM320 0x4d41
+
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 6365cfe5402c..fa07f6ff9ecc 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -409,7 +409,6 @@ static void oti6858_set_termios(struct tty_struct *tty,
cflag = tty->termios.c_cflag;
spin_lock_irqsave(&priv->lock, flags);
- divisor = priv->pending_setup.divisor;
frame_fmt = priv->pending_setup.frame_fmt;
control = priv->pending_setup.control;
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 2b098b55c4cb..c3ce51c2dabd 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -534,7 +534,6 @@ static void pdump(struct us_data *us, void *ibuffer, int length)
}
line[offset] = 0;
usb_stor_dbg(us, "%s\n", line);
- offset = 0;
}
#endif
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 15dc25801cdc..0aa079405d23 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -196,7 +196,7 @@ static int sddr55_read_data(struct us_data *us,
unsigned char *buffer;
unsigned int pba;
- unsigned long address;
+ unsigned int address;
unsigned short pages;
unsigned int len, offset;
@@ -316,7 +316,7 @@ static int sddr55_write_data(struct us_data *us,
unsigned int pba;
unsigned int new_pba;
- unsigned long address;
+ unsigned int address;
unsigned short pages;
int i;
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index f8ea3054be54..038dc51f429d 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -50,13 +50,17 @@ enum {
enum dp_state {
DP_STATE_IDLE,
DP_STATE_ENTER,
+ DP_STATE_ENTER_PRIME,
DP_STATE_UPDATE,
DP_STATE_CONFIGURE,
+ DP_STATE_CONFIGURE_PRIME,
DP_STATE_EXIT,
+ DP_STATE_EXIT_PRIME,
};
struct dp_altmode {
struct typec_displayport_data data;
+ struct typec_displayport_data data_prime;
enum dp_state state;
bool hpd;
@@ -67,6 +71,7 @@ struct dp_altmode {
struct typec_altmode *alt;
const struct typec_altmode *port;
struct fwnode_handle *connector_fwnode;
+ struct typec_altmode *plug_prime;
};
static int dp_altmode_notify(struct dp_altmode *dp)
@@ -99,12 +104,18 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
conf |= DP_CONF_UFP_U_AS_DFP_D;
pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
+ /* Account for active cable capabilities */
+ if (dp->plug_prime)
+ pin_assign &= DP_CAP_DFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
break;
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
+ /* Account for active cable capabilities */
+ if (dp->plug_prime)
+ pin_assign &= DP_CAP_UFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
break;
default:
break;
@@ -130,6 +141,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
}
dp->data.conf = conf;
+ if (dp->plug_prime)
+ dp->data_prime.conf = conf;
return 0;
}
@@ -143,13 +156,16 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
if (configured && (dp->data.status & DP_STATUS_SWITCH_TO_USB)) {
dp->data.conf = 0;
- dp->state = DP_STATE_CONFIGURE;
+ dp->data_prime.conf = 0;
+ dp->state = dp->plug_prime ? DP_STATE_CONFIGURE_PRIME :
+ DP_STATE_CONFIGURE;
} else if (dp->data.status & DP_STATUS_EXIT_DP_MODE) {
dp->state = DP_STATE_EXIT;
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
ret = dp_altmode_configure(dp, con);
if (!ret) {
- dp->state = DP_STATE_CONFIGURE;
+ dp->state = dp->plug_prime ? DP_STATE_CONFIGURE_PRIME :
+ DP_STATE_CONFIGURE;
if (dp->hpd != hpd) {
dp->hpd = hpd;
dp->pending_hpd = true;
@@ -209,6 +225,19 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
return ret;
}
+static int dp_altmode_configure_vdm_cable(struct dp_altmode *dp, u32 conf)
+{
+ int svdm_version = typec_altmode_get_cable_svdm_version(dp->plug_prime);
+ u32 header;
+
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE);
+
+ return typec_cable_altmode_vdm(dp->plug_prime, TYPEC_PLUG_SOP_P, header, &conf, 2);
+}
+
static void dp_altmode_work(struct work_struct *work)
{
struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
@@ -225,6 +254,19 @@ static void dp_altmode_work(struct work_struct *work)
if (ret && ret != -EBUSY)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
+ case DP_STATE_ENTER_PRIME:
+ ret = typec_cable_altmode_enter(dp->alt, TYPEC_PLUG_SOP_P, NULL);
+ /*
+ * If we fail to enter Alt Mode on SOP', then we should drop the
+ * plug from the driver and attempt to run the driver without
+ * it.
+ */
+ if (ret && ret != -EBUSY) {
+ dev_err(&dp->alt->dev, "plug failed to enter mode\n");
+ dp->state = DP_STATE_ENTER;
+ goto disable_prime;
+ }
+ break;
case DP_STATE_UPDATE:
svdm_version = typec_altmode_get_svdm_version(dp->alt);
if (svdm_version < 0)
@@ -243,10 +285,24 @@ static void dp_altmode_work(struct work_struct *work)
dev_err(&dp->alt->dev,
"unable to send Configure command (%d)\n", ret);
break;
+ case DP_STATE_CONFIGURE_PRIME:
+ ret = dp_altmode_configure_vdm_cable(dp, dp->data_prime.conf);
+ if (ret) {
+ dev_err(&dp->plug_prime->dev,
+ "unable to send Configure command (%d)\n",
+ ret);
+ dp->state = DP_STATE_CONFIGURE;
+ goto disable_prime;
+ }
+ break;
case DP_STATE_EXIT:
if (typec_altmode_exit(dp->alt))
dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
break;
+ case DP_STATE_EXIT_PRIME:
+ if (typec_cable_altmode_exit(dp->plug_prime, TYPEC_PLUG_SOP_P))
+ dev_err(&dp->plug_prime->dev, "Exit Mode Failed!\n");
+ break;
default:
break;
}
@@ -254,6 +310,13 @@ static void dp_altmode_work(struct work_struct *work)
dp->state = DP_STATE_IDLE;
mutex_unlock(&dp->lock);
+ return;
+
+disable_prime:
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ schedule_work(&dp->work);
+ mutex_unlock(&dp->lock);
}
static void dp_altmode_attention(struct typec_altmode *alt, const u32 vdo)
@@ -314,6 +377,8 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
dp->hpd = false;
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}
+ if (dp->plug_prime)
+ dp->state = DP_STATE_EXIT_PRIME;
break;
case DP_CMD_STATUS_UPDATE:
dp->data.status = *vdo;
@@ -348,10 +413,84 @@ err_unlock:
return ret;
}
+static int dp_cable_altmode_vdm(struct typec_altmode *alt, enum typec_plug_index sop,
+ const u32 hdr, const u32 *vdo, int count)
+{
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+
+ if (dp->state != DP_STATE_IDLE) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ typec_altmode_update_active(dp->plug_prime, true);
+ dp->state = DP_STATE_ENTER;
+ break;
+ case CMD_EXIT_MODE:
+ dp->data_prime.status = 0;
+ dp->data_prime.conf = 0;
+ typec_altmode_update_active(dp->plug_prime, false);
+ break;
+ case DP_CMD_CONFIGURE:
+ dp->state = DP_STATE_CONFIGURE;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case DP_CMD_CONFIGURE:
+ dp->data_prime.conf = 0;
+ /* Attempt to configure on SOP, drop plug */
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ dp->state = DP_STATE_CONFIGURE;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (dp->state != DP_STATE_IDLE)
+ schedule_work(&dp->work);
+
+err_unlock:
+ mutex_unlock(&dp->lock);
+ return ret;
+}
+
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
- return activate ? typec_altmode_enter(alt, NULL) :
- typec_altmode_exit(alt);
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int ret;
+
+ if (activate) {
+ if (dp->plug_prime) {
+ ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret < 0) {
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ } else {
+ return ret;
+ }
+ }
+ return typec_altmode_enter(alt, NULL);
+ } else {
+ return typec_altmode_exit(alt);
+ }
}
static const struct typec_altmode_ops dp_altmode_ops = {
@@ -360,6 +499,10 @@ static const struct typec_altmode_ops dp_altmode_ops = {
.activate = dp_altmode_activate,
};
+static const struct typec_cable_ops dp_cable_ops = {
+ .vdm = dp_cable_altmode_vdm,
+};
+
static const char * const configurations[] = {
[DP_CONF_USB] = "USB",
[DP_CONF_DFP_D] = "source",
@@ -501,6 +644,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
/* Only send Configure command if a configuration has been set */
if (dp->alt->active && DP_CONF_CURRENTLY(dp->data.conf)) {
+ /* todo: send manual configure over SOP'*/
ret = dp_altmode_configure_vdm(dp, conf);
if (ret)
goto out_unlock;
@@ -579,6 +723,7 @@ static const struct attribute_group *displayport_groups[] = {
int dp_altmode_probe(struct typec_altmode *alt)
{
const struct typec_altmode *port = typec_altmode_get_partner(alt);
+ struct typec_altmode *plug = typec_altmode_get_plug(alt, TYPEC_PLUG_SOP_P);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
@@ -603,6 +748,13 @@ int dp_altmode_probe(struct typec_altmode *alt)
alt->desc = "DisplayPort";
alt->ops = &dp_altmode_ops;
+ if (plug) {
+ plug->desc = "Displayport";
+ plug->cable_ops = &dp_cable_ops;
+ }
+
+ dp->plug_prime = plug;
+
fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
if (fwnode_property_present(fwnode, "displayport"))
dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
@@ -612,8 +764,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
dp->connector_fwnode = NULL;
typec_altmode_set_drvdata(alt, dp);
+ if (plug)
+ typec_altmode_set_drvdata(plug, dp);
- dp->state = DP_STATE_ENTER;
+ dp->state = plug ? DP_STATE_ENTER_PRIME : DP_STATE_ENTER;
schedule_work(&dp->work);
return 0;
@@ -625,6 +779,7 @@ void dp_altmode_remove(struct typec_altmode *alt)
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
cancel_work_sync(&dp->work);
+ typec_altmode_put_plug(dp->plug_prime);
if (dp->connector_fwnode) {
drm_connector_oob_hotplug_event(dp->connector_fwnode,
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index e95ec7e382bb..6ea103e1abae 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -245,6 +245,108 @@ typec_altmode_get_partner(struct typec_altmode *adev)
EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
/* -------------------------------------------------------------------------- */
+/* API for cable alternate modes */
+
+/**
+ * typec_cable_altmode_enter - Enter Mode
+ * @adev: The alternate mode
+ * @sop: Cable plug target for Enter Mode command
+ * @vdo: VDO for the Enter Mode command
+ *
+ * Alternate mode drivers use this function to enter mode on the cable plug.
+ * If the alternate mode does not require VDO, @vdo must be NULL.
+ */
+int typec_cable_altmode_enter(struct typec_altmode *adev, enum typec_plug_index sop, u32 *vdo)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!adev || adev->active)
+ return 0;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
+
+ if (!pdev->active)
+ return -EPERM;
+
+ if (!pdev->cable_ops || !pdev->cable_ops->enter)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->enter(pdev, sop, vdo);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_enter);
+
+/**
+ * typec_cable_altmode_exit - Exit Mode
+ * @adev: The alternate mode
+ * @sop: Cable plug target for Exit Mode command
+ *
+ * The alternate mode drivers use this function to exit mode on the cable plug.
+ */
+int typec_cable_altmode_exit(struct typec_altmode *adev, enum typec_plug_index sop)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!adev || !adev->active)
+ return 0;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
+
+ if (!pdev->cable_ops || !pdev->cable_ops->exit)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->exit(pdev, sop);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_exit);
+
+/**
+ * typec_cable_altmode_vdm - Send Vendor Defined Messages (VDM) between the cable plug and port.
+ * @adev: Alternate mode handle
+ * @sop: Cable plug target for VDM
+ * @header: VDM Header
+ * @vdo: Array of Vendor Defined Data Objects
+ * @count: Number of Data Objects
+ *
+ * The alternate mode drivers use this function for SVID specific communication
+ * with the cable plugs. The port drivers use it to deliver the Structured VDMs
+ * received from the cable plugs to the alternate mode drivers.
+ */
+int typec_cable_altmode_vdm(struct typec_altmode *adev, enum typec_plug_index sop,
+ const u32 header, const u32 *vdo, int count)
+{
+ struct altmode *altmode;
+ struct typec_altmode *pdev;
+
+ if (!adev)
+ return 0;
+
+ altmode = to_altmode(adev);
+
+ if (is_typec_plug(adev->dev.parent)) {
+ if (!altmode->partner)
+ return -ENODEV;
+ pdev = &altmode->partner->adev;
+ } else {
+ if (!altmode->plug[sop])
+ return -ENODEV;
+ pdev = &altmode->plug[sop]->adev;
+ }
+
+ if (!pdev->cable_ops || !pdev->cable_ops->vdm)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->vdm(pdev, sop, header, vdo, count);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_vdm);
+
+/* -------------------------------------------------------------------------- */
/* API for the alternate mode drivers */
/**
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 015aa9253353..389c7f0b8d93 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -21,7 +21,7 @@
static DEFINE_IDA(typec_index_ida);
-struct class typec_class = {
+const struct class typec_class = {
.name = "typec",
};
@@ -2132,6 +2132,46 @@ int typec_get_negotiated_svdm_version(struct typec_port *port)
EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version);
/**
+ * typec_get_cable_svdm_version - Get cable negotiated SVDM Version
+ * @port: USB Type-C Port.
+ *
+ * Get the negotiated SVDM Version for the cable. The Version is set to the port
+ * default value based on the PD Revision during cable registration, and updated
+ * after a successful Discover Identity if the negotiated value is less than the
+ * default.
+ *
+ * Returns usb_pd_svdm_ver if the cable has been registered otherwise -ENODEV.
+ */
+int typec_get_cable_svdm_version(struct typec_port *port)
+{
+ enum usb_pd_svdm_ver svdm_version;
+ struct device *cable_dev;
+
+ cable_dev = device_find_child(&port->dev, NULL, cable_match);
+ if (!cable_dev)
+ return -ENODEV;
+
+ svdm_version = to_typec_cable(cable_dev)->svdm_version;
+ put_device(cable_dev);
+
+ return svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_get_cable_svdm_version);
+
+/**
+ * typec_cable_set_svdm_version - Set negotiated Structured VDM (SVDM) Version
+ * @cable: USB Type-C Active Cable that supports SVDM
+ * @svdm_version: Negotiated SVDM Version
+ *
+ * This routine is used to save the negotiated SVDM Version.
+ */
+void typec_cable_set_svdm_version(struct typec_cable *cable, enum usb_pd_svdm_ver svdm_version)
+{
+ cable->svdm_version = svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_cable_set_svdm_version);
+
+/**
* typec_get_drvdata - Return private driver data pointer
* @port: USB Type-C port
*/
@@ -2281,6 +2321,25 @@ void typec_port_register_altmodes(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_port_register_altmodes);
/**
+ * typec_port_register_cable_ops - Register typec_cable_ops to port altmodes
+ * @altmodes: USB Type-C Port's altmode vector
+ * @max_altmodes: The maximum number of alt modes supported by the port
+ * @ops: Cable alternate mode vector
+ */
+void typec_port_register_cable_ops(struct typec_altmode **altmodes, int max_altmodes,
+ const struct typec_cable_ops *ops)
+{
+ int i;
+
+ for (i = 0; i < max_altmodes; i++) {
+ if (!altmodes[i])
+ return;
+ altmodes[i]->cable_ops = ops;
+ }
+}
+EXPORT_SYMBOL_GPL(typec_port_register_cable_ops);
+
+/**
* typec_register_port - Register a USB Type-C Port
* @parent: Parent device
* @cap: Description of the port
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index c36761ba3f59..7485cdb9dd20 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -23,6 +23,7 @@ struct typec_cable {
struct usb_pd_identity *identity;
unsigned int active:1;
u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
};
struct typec_partner {
@@ -92,9 +93,9 @@ extern const struct device_type typec_port_dev_type;
#define is_typec_plug(dev) ((dev)->type == &typec_plug_dev_type)
#define is_typec_port(dev) ((dev)->type == &typec_port_dev_type)
-extern struct class typec_mux_class;
-extern struct class retimer_class;
-extern struct class typec_class;
+extern const struct class typec_mux_class;
+extern const struct class retimer_class;
+extern const struct class typec_class;
#if defined(CONFIG_ACPI)
int typec_link_ports(struct typec_port *connector);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 80dd91938d96..49926d6e72c7 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -469,6 +469,6 @@ void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
}
EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
-struct class typec_mux_class = {
+const struct class typec_mux_class = {
.name = "typec_mux",
};
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index d2cb5e733e57..399c7b0983df 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -36,6 +36,16 @@ config TYPEC_MUX_INTEL_PMC
control the USB role switch and also the multiplexer/demultiplexer
switches used with USB Type-C Alternate Modes.
+config TYPEC_MUX_IT5205
+ tristate "ITE IT5205 Type-C USB Alt Mode Passive MUX driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Driver for the ITE IT5205 Type-C USB Alternate Mode Passive MUX
+ which provides support for muxing DisplayPort and sideband signals
+ on a common USB Type-C connector.
+ If compiled as a module, the module will be named it5205.
+
config TYPEC_MUX_NB7VPQ904M
tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 57dc9ac6f8dc..bb96f30267af 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
+obj-$(CONFIG_TYPEC_MUX_IT5205) += it5205.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS) += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/it5205.c b/drivers/usb/typec/mux/it5205.c
new file mode 100644
index 000000000000..5535932e42cd
--- /dev/null
+++ b/drivers/usb/typec/mux/it5205.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ITE IT5205 Type-C USB alternate mode passive mux
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define IT5205_REG_CHIP_ID(x) (0x4 + (x))
+#define IT5205FN_CHIP_ID 0x35323035 /* "5205" */
+
+/* MUX power down register */
+#define IT5205_REG_MUXPDR 0x10
+#define IT5205_MUX_POWER_DOWN BIT(0)
+
+/* MUX control register */
+#define IT5205_REG_MUXCR 0x11
+#define IT5205_POLARITY_INVERTED BIT(4)
+#define IT5205_DP_USB_CTRL_MASK GENMASK(3, 0)
+#define IT5205_DP 0x0f
+#define IT5205_DP_USB 0x03
+#define IT5205_USB 0x07
+
+/* Vref Select Register */
+#define IT5205_REG_VSR 0x10
+#define IT5205_VREF_SELECT_MASK GENMASK(5, 4)
+#define IT5205_VREF_SELECT_3_3V 0x00
+#define IT5205_VREF_SELECT_OFF 0x20
+
+/* CSBU Over Voltage Protection Register */
+#define IT5205_REG_CSBUOVPSR 0x1e
+#define IT5205_OVP_SELECT_MASK GENMASK(5, 4)
+#define IT5205_OVP_3_90V 0x00
+#define IT5205_OVP_3_68V 0x10
+#define IT5205_OVP_3_62V 0x20
+#define IT5205_OVP_3_57V 0x30
+
+/* CSBU Switch Register */
+#define IT5205_REG_CSBUSR 0x22
+#define IT5205_CSBUSR_SWITCH BIT(0)
+
+/* Interrupt Switch Register */
+#define IT5205_REG_ISR 0x25
+#define IT5205_ISR_CSBU_MASK BIT(4)
+#define IT5205_ISR_CSBU_OVP BIT(0)
+
+struct it5205 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+};
+
+static int it5205_switch_set(struct typec_switch_dev *sw, enum typec_orientation orientation)
+{
+ struct it5205 *it = typec_switch_get_drvdata(sw);
+
+ switch (orientation) {
+ case TYPEC_ORIENTATION_NORMAL:
+ regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_POLARITY_INVERTED, 0);
+ break;
+ case TYPEC_ORIENTATION_REVERSE:
+ regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_POLARITY_INVERTED, IT5205_POLARITY_INVERTED);
+ break;
+ case TYPEC_ORIENTATION_NONE:
+ fallthrough;
+ default:
+ regmap_write(it->regmap, IT5205_REG_MUXCR, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int it5205_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct it5205 *it = typec_mux_get_drvdata(mux);
+ u8 val;
+
+ if (state->mode >= TYPEC_STATE_MODAL &&
+ state->alt->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ switch (state->mode) {
+ case TYPEC_STATE_USB:
+ val = IT5205_USB;
+ break;
+ case TYPEC_DP_STATE_C:
+ fallthrough;
+ case TYPEC_DP_STATE_E:
+ val = IT5205_DP;
+ break;
+ case TYPEC_DP_STATE_D:
+ val = IT5205_DP_USB;
+ break;
+ case TYPEC_STATE_SAFE:
+ fallthrough;
+ default:
+ val = 0;
+ break;
+ }
+
+ return regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_DP_USB_CTRL_MASK, val);
+}
+
+static irqreturn_t it5205_irq_handler(int irq, void *data)
+{
+ struct it5205 *it = data;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(it->regmap, IT5205_REG_ISR, &val);
+ if (ret)
+ return IRQ_NONE;
+
+ if (val & IT5205_ISR_CSBU_OVP) {
+ dev_warn(&it->client->dev, "Overvoltage detected!\n");
+
+ /* Reset CSBU */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, 0);
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, IT5205_CSBUSR_SWITCH);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void it5205_enable_ovp(struct it5205 *it)
+{
+ /* Select Vref 3.3v */
+ regmap_update_bits(it->regmap, IT5205_REG_VSR,
+ IT5205_VREF_SELECT_MASK, IT5205_VREF_SELECT_3_3V);
+
+ /* Trigger OVP at 3.68V */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUOVPSR,
+ IT5205_OVP_SELECT_MASK, IT5205_OVP_3_68V);
+
+ /* Unmask OVP interrupt */
+ regmap_update_bits(it->regmap, IT5205_REG_ISR,
+ IT5205_ISR_CSBU_MASK, 0);
+
+ /* Enable CSBU Interrupt */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, IT5205_CSBUSR_SWITCH);
+}
+
+static const struct regmap_config it5205_regmap = {
+ .max_register = 0x2f,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int it5205_probe(struct i2c_client *client)
+{
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct device *dev = &client->dev;
+ struct it5205 *it;
+ u32 val, chipid = 0;
+ int i, ret;
+
+ it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
+ if (!it)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
+
+ it->client = client;
+
+ it->regmap = devm_regmap_init_i2c(client, &it5205_regmap);
+ if (IS_ERR(it->regmap))
+ return dev_err_probe(dev, PTR_ERR(it->regmap),
+ "Failed to init regmap\n");
+
+ /* IT5205 needs a long time to power up after enabling regulator */
+ msleep(50);
+
+ /* Unset poweroff bit */
+ ret = regmap_write(it->regmap, IT5205_REG_MUXPDR, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set power on\n");
+
+ /* Read the 32 bits ChipID */
+ for (i = 3; i >= 0; i--) {
+ ret = regmap_read(it->regmap, IT5205_REG_CHIP_ID(i), &val);
+ if (ret)
+ return ret;
+
+ chipid |= val << (i * 8);
+ }
+
+ if (chipid != IT5205FN_CHIP_ID)
+ return dev_err_probe(dev, -EINVAL,
+ "Unknown ChipID 0x%x\n", chipid);
+
+ /* Initialize as USB mode with default (non-inverted) polarity */
+ ret = regmap_write(it->regmap, IT5205_REG_MUXCR, IT5205_USB);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set mode to USB\n");
+
+ sw_desc.drvdata = it;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = it5205_switch_set;
+
+ it->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(it->sw))
+ return dev_err_probe(dev, PTR_ERR(it->sw),
+ "failed to register typec switch\n");
+
+ mux_desc.drvdata = it;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = it5205_mux_set;
+
+ it->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(it->mux)) {
+ typec_switch_unregister(it->sw);
+ return dev_err_probe(dev, PTR_ERR(it->mux),
+ "failed to register typec mux\n");
+ }
+
+ i2c_set_clientdata(client, it);
+
+ if (of_property_read_bool(dev->of_node, "ite,ovp-enable") && client->irq) {
+ it5205_enable_ovp(it);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ it5205_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), it);
+ if (ret) {
+ typec_mux_unregister(it->mux);
+ typec_switch_unregister(it->sw);
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+ }
+ }
+
+ return 0;
+}
+
+static void it5205_remove(struct i2c_client *client)
+{
+ struct it5205 *it = i2c_get_clientdata(client);
+
+ typec_mux_unregister(it->mux);
+ typec_switch_unregister(it->sw);
+}
+
+static const struct i2c_device_id it5205_table[] = {
+ { "it5205" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, it5205_table);
+
+static const struct of_device_id it5205_of_table[] = {
+ { .compatible = "ite,it5205" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, it5205_of_table);
+
+static struct i2c_driver it5205_driver = {
+ .driver = {
+ .name = "it5205",
+ .of_match_table = it5205_of_table,
+ },
+ .probe = it5205_probe,
+ .remove = it5205_remove,
+ .id_table = it5205_table,
+};
+module_i2c_driver(it5205_driver);
+
+MODULE_AUTHOR("Tianping Fang <tianping.fang@mediatek.com>");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("ITE IT5205 alternate mode passive MUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index b9cca2be76fc..d78c04a421bc 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -157,7 +157,7 @@ static const struct attribute_group source_fixed_supply_group = {
};
__ATTRIBUTE_GROUPS(source_fixed_supply);
-static struct device_type source_fixed_supply_type = {
+static const struct device_type source_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_fixed_supply_groups,
@@ -182,7 +182,7 @@ static const struct attribute_group sink_fixed_supply_group = {
};
__ATTRIBUTE_GROUPS(sink_fixed_supply);
-static struct device_type sink_fixed_supply_type = {
+static const struct device_type sink_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_fixed_supply_groups,
@@ -213,7 +213,7 @@ static struct attribute *source_variable_supply_attrs[] = {
};
ATTRIBUTE_GROUPS(source_variable_supply);
-static struct device_type source_variable_supply_type = {
+static const struct device_type source_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_variable_supply_groups,
@@ -227,7 +227,7 @@ static struct attribute *sink_variable_supply_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_variable_supply);
-static struct device_type sink_variable_supply_type = {
+static const struct device_type sink_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_variable_supply_groups,
@@ -258,7 +258,7 @@ static struct attribute *source_battery_attrs[] = {
};
ATTRIBUTE_GROUPS(source_battery);
-static struct device_type source_battery_type = {
+static const struct device_type source_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_battery_groups,
@@ -272,7 +272,7 @@ static struct attribute *sink_battery_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_battery);
-static struct device_type sink_battery_type = {
+static const struct device_type sink_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_battery_groups,
@@ -339,7 +339,7 @@ static struct attribute *source_pps_attrs[] = {
};
ATTRIBUTE_GROUPS(source_pps);
-static struct device_type source_pps_type = {
+static const struct device_type source_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_pps_groups,
@@ -353,7 +353,7 @@ static struct attribute *sink_pps_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_pps);
-static struct device_type sink_pps_type = {
+static const struct device_type sink_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_pps_groups,
@@ -371,30 +371,30 @@ static const char * const apdo_supply_name[] = {
[APDO_TYPE_PPS] = "programmable_supply",
};
-static struct device_type *source_type[] = {
+static const struct device_type *source_type[] = {
[PDO_TYPE_FIXED] = &source_fixed_supply_type,
[PDO_TYPE_BATT] = &source_battery_type,
[PDO_TYPE_VAR] = &source_variable_supply_type,
};
-static struct device_type *source_apdo_type[] = {
+static const struct device_type *source_apdo_type[] = {
[APDO_TYPE_PPS] = &source_pps_type,
};
-static struct device_type *sink_type[] = {
+static const struct device_type *sink_type[] = {
[PDO_TYPE_FIXED] = &sink_fixed_supply_type,
[PDO_TYPE_BATT] = &sink_battery_type,
[PDO_TYPE_VAR] = &sink_variable_supply_type,
};
-static struct device_type *sink_apdo_type[] = {
+static const struct device_type *sink_apdo_type[] = {
[APDO_TYPE_PPS] = &sink_pps_type,
};
/* REVISIT: Export when EPR_*_Capabilities need to be supported. */
static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position)
{
- struct device_type *type;
+ const struct device_type *type;
const char *name;
struct pdo *p;
int ret;
@@ -460,7 +460,7 @@ static void pd_capabilities_release(struct device *dev)
kfree(to_usb_power_delivery_capabilities(dev));
}
-static struct device_type pd_capabilities_type = {
+static const struct device_type pd_capabilities_type = {
.name = "capabilities",
.release = pd_capabilities_release,
};
@@ -575,7 +575,7 @@ static void pd_release(struct device *dev)
kfree(pd);
}
-static struct device_type pd_type = {
+static const struct device_type pd_type = {
.name = "usb_power_delivery",
.release = pd_release,
.groups = pd_groups,
diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c
index 4a7d1b5c4d86..b519fcf358ca 100644
--- a/drivers/usb/typec/retimer.c
+++ b/drivers/usb/typec/retimer.c
@@ -155,6 +155,6 @@ void *typec_retimer_get_drvdata(struct typec_retimer *retimer)
}
EXPORT_SYMBOL_GPL(typec_retimer_get_drvdata);
-struct class retimer_class = {
+const struct class retimer_class = {
.name = "retimer",
};
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index bc21006e979c..ef18a448b740 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1467,7 +1467,7 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC))
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
else
- tcpm_pd_receive(chip->tcpm_port, msg);
+ tcpm_pd_receive(chip->tcpm_port, msg, TCPC_TX_SOP);
return ret;
}
diff --git a/drivers/usb/typec/tcpm/qcom/Makefile b/drivers/usb/typec/tcpm/qcom/Makefile
index dc1e8832e197..cc23042b9487 100644
--- a/drivers/usb/typec/tcpm/qcom/Makefile
+++ b/drivers/usb/typec/tcpm/qcom/Makefile
@@ -3,4 +3,5 @@
obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom_pmic_tcpm.o
qcom_pmic_tcpm-y += qcom_pmic_typec.o \
qcom_pmic_typec_port.o \
- qcom_pmic_typec_pdphy.o
+ qcom_pmic_typec_pdphy.o \
+ qcom_pmic_typec_pdphy_stub.o \
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 1a2b4bddaa97..e48412cdcb0f 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -20,130 +20,15 @@
#include <drm/bridge/aux-bridge.h>
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
struct pmic_typec_resources {
- struct pmic_typec_pdphy_resources *pdphy_res;
- struct pmic_typec_port_resources *port_res;
+ const struct pmic_typec_pdphy_resources *pdphy_res;
+ const struct pmic_typec_port_resources *port_res;
};
-struct pmic_typec {
- struct device *dev;
- struct tcpm_port *tcpm_port;
- struct tcpc_dev tcpc;
- struct pmic_typec_pdphy *pmic_typec_pdphy;
- struct pmic_typec_port *pmic_typec_port;
- bool vbus_enabled;
- struct mutex lock; /* VBUS state serialization */
-};
-
-#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
-
-static int qcom_pmic_typec_get_vbus(struct tcpc_dev *tcpc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
- int ret;
-
- mutex_lock(&tcpm->lock);
- ret = tcpm->vbus_enabled || qcom_pmic_typec_port_get_vbus(tcpm->pmic_typec_port);
- mutex_unlock(&tcpm->lock);
-
- return ret;
-}
-
-static int qcom_pmic_typec_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
- int ret = 0;
-
- mutex_lock(&tcpm->lock);
- if (tcpm->vbus_enabled == on)
- goto done;
-
- ret = qcom_pmic_typec_port_set_vbus(tcpm->pmic_typec_port, on);
- if (ret)
- goto done;
-
- tcpm->vbus_enabled = on;
- tcpm_vbus_change(tcpm->tcpm_port);
-
-done:
- dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
- mutex_unlock(&tcpm->lock);
-
- return ret;
-}
-
-static int qcom_pmic_typec_set_vconn(struct tcpc_dev *tcpc, bool on)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_set_vconn(tcpm->pmic_typec_port, on);
-}
-
-static int qcom_pmic_typec_get_cc(struct tcpc_dev *tcpc,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_get_cc(tcpm->pmic_typec_port, cc1, cc2);
-}
-
-static int qcom_pmic_typec_set_cc(struct tcpc_dev *tcpc,
- enum typec_cc_status cc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_set_cc(tcpm->pmic_typec_port, cc);
-}
-
-static int qcom_pmic_typec_set_polarity(struct tcpc_dev *tcpc,
- enum typec_cc_polarity pol)
-{
- /* Polarity is set separately by phy-qcom-qmp.c */
- return 0;
-}
-
-static int qcom_pmic_typec_start_toggling(struct tcpc_dev *tcpc,
- enum typec_port_type port_type,
- enum typec_cc_status cc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_start_toggling(tcpm->pmic_typec_port,
- port_type, cc);
-}
-
-static int qcom_pmic_typec_set_roles(struct tcpc_dev *tcpc, bool attached,
- enum typec_role power_role,
- enum typec_data_role data_role)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_set_roles(tcpm->pmic_typec_pdphy,
- data_role, power_role);
-}
-
-static int qcom_pmic_typec_set_pd_rx(struct tcpc_dev *tcpc, bool on)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_set_pd_rx(tcpm->pmic_typec_pdphy, on);
-}
-
-static int qcom_pmic_typec_pd_transmit(struct tcpc_dev *tcpc,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_pd_transmit(tcpm->pmic_typec_pdphy, type,
- msg, negotiated_rev);
-}
-
static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
{
return 0;
@@ -157,7 +42,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
const struct pmic_typec_resources *res;
struct regmap *regmap;
struct device *bridge_dev;
- u32 base[2];
+ u32 base;
int ret;
res = of_device_get_match_data(dev);
@@ -170,16 +55,6 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
tcpm->dev = dev;
tcpm->tcpc.init = qcom_pmic_typec_init;
- tcpm->tcpc.get_vbus = qcom_pmic_typec_get_vbus;
- tcpm->tcpc.set_vbus = qcom_pmic_typec_set_vbus;
- tcpm->tcpc.set_cc = qcom_pmic_typec_set_cc;
- tcpm->tcpc.get_cc = qcom_pmic_typec_get_cc;
- tcpm->tcpc.set_polarity = qcom_pmic_typec_set_polarity;
- tcpm->tcpc.set_vconn = qcom_pmic_typec_set_vconn;
- tcpm->tcpc.start_toggling = qcom_pmic_typec_start_toggling;
- tcpm->tcpc.set_pd_rx = qcom_pmic_typec_set_pd_rx;
- tcpm->tcpc.set_roles = qcom_pmic_typec_set_roles;
- tcpm->tcpc.pd_transmit = qcom_pmic_typec_pd_transmit;
regmap = dev_get_regmap(dev->parent, NULL);
if (!regmap) {
@@ -187,29 +62,30 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = of_property_read_u32_array(np, "reg", base, 2);
+ ret = of_property_read_u32_index(np, "reg", 0, &base);
if (ret)
return ret;
- tcpm->pmic_typec_port = qcom_pmic_typec_port_alloc(dev);
- if (IS_ERR(tcpm->pmic_typec_port))
- return PTR_ERR(tcpm->pmic_typec_port);
-
- tcpm->pmic_typec_pdphy = qcom_pmic_typec_pdphy_alloc(dev);
- if (IS_ERR(tcpm->pmic_typec_pdphy))
- return PTR_ERR(tcpm->pmic_typec_pdphy);
-
- ret = qcom_pmic_typec_port_probe(pdev, tcpm->pmic_typec_port,
- res->port_res, regmap, base[0]);
+ ret = qcom_pmic_typec_port_probe(pdev, tcpm,
+ res->port_res, regmap, base);
if (ret)
return ret;
- ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm->pmic_typec_pdphy,
- res->pdphy_res, regmap, base[1]);
- if (ret)
- return ret;
+ if (res->pdphy_res) {
+ ret = of_property_read_u32_index(np, "reg", 1, &base);
+ if (ret)
+ return ret;
+
+ ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm,
+ res->pdphy_res, regmap, base);
+ if (ret)
+ return ret;
+ } else {
+ ret = qcom_pmic_typec_pdphy_stub_probe(pdev, tcpm);
+ if (ret)
+ return ret;
+ }
- mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
@@ -226,13 +102,11 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
goto fwnode_remove;
}
- ret = qcom_pmic_typec_port_start(tcpm->pmic_typec_port,
- tcpm->tcpm_port);
+ ret = tcpm->port_start(tcpm, tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
- ret = qcom_pmic_typec_pdphy_start(tcpm->pmic_typec_pdphy,
- tcpm->tcpm_port);
+ ret = tcpm->pdphy_start(tcpm, tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
@@ -248,91 +122,25 @@ static void qcom_pmic_typec_remove(struct platform_device *pdev)
{
struct pmic_typec *tcpm = platform_get_drvdata(pdev);
- qcom_pmic_typec_pdphy_stop(tcpm->pmic_typec_pdphy);
- qcom_pmic_typec_port_stop(tcpm->pmic_typec_port);
+ tcpm->pdphy_stop(tcpm);
+ tcpm->port_stop(tcpm);
tcpm_unregister_port(tcpm->tcpm_port);
fwnode_remove_software_node(tcpm->tcpc.fwnode);
}
-static struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
- .irq_params = {
- {
- .virq = PMIC_PDPHY_SIG_TX_IRQ,
- .irq_name = "sig-tx",
- },
- {
- .virq = PMIC_PDPHY_SIG_RX_IRQ,
- .irq_name = "sig-rx",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_IRQ,
- .irq_name = "msg-tx",
- },
- {
- .virq = PMIC_PDPHY_MSG_RX_IRQ,
- .irq_name = "msg-rx",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
- .irq_name = "msg-tx-failed",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
- .irq_name = "msg-tx-discarded",
- },
- {
- .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
- .irq_name = "msg-rx-discarded",
- },
- },
- .nr_irqs = 7,
-};
-
-static struct pmic_typec_port_resources pm8150b_port_res = {
- .irq_params = {
- {
- .irq_name = "vpd-detect",
- .virq = PMIC_TYPEC_VPD_IRQ,
- },
-
- {
- .irq_name = "cc-state-change",
- .virq = PMIC_TYPEC_CC_STATE_IRQ,
- },
- {
- .irq_name = "vconn-oc",
- .virq = PMIC_TYPEC_VCONN_OC_IRQ,
- },
-
- {
- .irq_name = "vbus-change",
- .virq = PMIC_TYPEC_VBUS_IRQ,
- },
-
- {
- .irq_name = "attach-detach",
- .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
- },
- {
- .irq_name = "legacy-cable-detect",
- .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
- },
-
- {
- .irq_name = "try-snk-src-detect",
- .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
- },
- },
- .nr_irqs = 7,
+static const struct pmic_typec_resources pm8150b_typec_res = {
+ .pdphy_res = &pm8150b_pdphy_res,
+ .port_res = &pm8150b_port_res,
};
-static struct pmic_typec_resources pm8150b_typec_res = {
- .pdphy_res = &pm8150b_pdphy_res,
+static const struct pmic_typec_resources pmi632_typec_res = {
+ /* PD PHY not present */
.port_res = &pm8150b_port_res,
};
static const struct of_device_id qcom_pmic_typec_table[] = {
{ .compatible = "qcom,pm8150b-typec", .data = &pm8150b_typec_res },
+ { .compatible = "qcom,pmi632-typec", .data = &pmi632_typec_res },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h
new file mode 100644
index 000000000000..3c75820c9187
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __QCOM_PMIC_TYPEC_H__
+#define __QCOM_PMIC_TYPEC_H__
+
+struct pmic_typec {
+ struct device *dev;
+ struct tcpm_port *tcpm_port;
+ struct tcpc_dev tcpc;
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
+ struct pmic_typec_port *pmic_typec_port;
+
+ int (*pdphy_start)(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port);
+ void (*pdphy_stop)(struct pmic_typec *tcpm);
+
+ int (*port_start)(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port);
+ void (*port_stop)(struct pmic_typec *tcpm);
+};
+
+#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
+
+#endif
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 52c81378e36e..6560f4fc98d5 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -14,8 +14,74 @@
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_pdphy.h"
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG_REG 0x40
+#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL_REG 0x46
+#define CONTROL_ENABLE BIT(0)
+
+#define USB_PDPHY_RX_STATUS_REG 0x4A
+#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER_REG 0x4C
+#define FRAME_FILTER_EN_HARD_RESET BIT(5)
+#define FRAME_FILTER_EN_SOP BIT(0)
+
+#define USB_PDPHY_TX_SIZE_REG 0x42
+#define TX_SIZE_MASK 0xF
+
+#define USB_PDPHY_TX_CONTROL_REG 0x44
+#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
+#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL BIT(1)
+#define TX_CONTROL_SEND_MSG BIT(0)
+
+#define USB_PDPHY_RX_SIZE_REG 0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
+#define RX_BUFFER_TOKEN BIT(0)
+
+#define USB_PDPHY_BIST_MODE_REG 0x4E
+#define BIST_MODE_MASK 0xF
+#define BIST_ENABLE BIT(7)
+#define PD_MSG_BIST 0x3
+#define PD_BIST_TEST_DATA_MODE 0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
+#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
+
+#define USB_PDPHY_RX_BUFFER_REG 0x80
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
+#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
+
+/* Message Spec Rev field */
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+
+/* timers */
+#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
+
+/* Interrupt numbers */
+#define PMIC_PDPHY_SIG_TX_IRQ 0x0
+#define PMIC_PDPHY_SIG_RX_IRQ 0x1
+#define PMIC_PDPHY_MSG_TX_IRQ 0x2
+#define PMIC_PDPHY_MSG_RX_IRQ 0x3
+#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
+#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
+#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
+#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
+
+
struct pmic_typec_pdphy_irq_data {
int virq;
int irq;
@@ -231,11 +297,13 @@ done:
return ret;
}
-int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev)
+static int qcom_pmic_typec_pdphy_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
int ret;
@@ -299,7 +367,7 @@ done:
if (!ret) {
dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
- tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
+ tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg, TCPC_TX_SOP);
}
}
@@ -336,8 +404,10 @@ static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
+static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
unsigned long flags;
int ret;
@@ -353,9 +423,12 @@ int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, b
return ret;
}
-int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
- bool data_role_host, bool power_role_src)
+static int qcom_pmic_typec_pdphy_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
unsigned long flags;
int ret;
@@ -366,12 +439,13 @@ int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_PORT_DATA_ROLE |
MSG_CONFIG_PORT_POWER_ROLE,
- data_role_host << 3 | power_role_src << 2);
+ (data_role == TYPEC_HOST ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
+ (power_role == TYPEC_SOURCE ? MSG_CONFIG_PORT_POWER_ROLE : 0));
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
- data_role_host, power_role_src);
+ data_role, power_role);
return ret;
}
@@ -435,9 +509,10 @@ done:
return ret;
}
-int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct tcpm_port *tcpm_port)
+static int qcom_pmic_typec_pdphy_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
int i;
int ret;
@@ -457,8 +532,9 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
return 0;
}
-void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
+static void qcom_pmic_typec_pdphy_stop(struct pmic_typec *tcpm)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
int i;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
@@ -469,21 +545,21 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
}
-struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
-{
- return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
-}
-
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
- struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct pmic_typec_pdphy_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
struct device *dev = &pdev->dev;
struct pmic_typec_pdphy_irq_data *irq_data;
int i, ret, irq;
+ pmic_typec_pdphy = devm_kzalloc(dev, sizeof(*pmic_typec_pdphy), GFP_KERNEL);
+ if (!pmic_typec_pdphy)
+ return -ENOMEM;
+
if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
return -EINVAL;
@@ -522,5 +598,48 @@ int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
return ret;
}
+ tcpm->pmic_typec_pdphy = pmic_typec_pdphy;
+
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_pd_transmit;
+
+ tcpm->pdphy_start = qcom_pmic_typec_pdphy_start;
+ tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stop;
+
return 0;
}
+
+const struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
+ .irq_params = {
+ {
+ .virq = PMIC_PDPHY_SIG_TX_IRQ,
+ .irq_name = "sig-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_SIG_RX_IRQ,
+ .irq_name = "sig-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_IRQ,
+ .irq_name = "msg-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_IRQ,
+ .irq_name = "msg-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
+ .irq_name = "msg-tx-failed",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
+ .irq_name = "msg-tx-discarded",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
+ .irq_name = "msg-rx-discarded",
+ },
+ },
+ .nr_irqs = 7,
+};
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
index e67954e31b14..04dee20293cf 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
@@ -8,74 +8,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/usb/tcpm.h>
-
-#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
-#define USB_PDPHY_MSG_HDR_LEN 2
-
-/* PD PHY register offsets and bit fields */
-#define USB_PDPHY_MSG_CONFIG_REG 0x40
-#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
-#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
-#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
-
-#define USB_PDPHY_EN_CONTROL_REG 0x46
-#define CONTROL_ENABLE BIT(0)
-
-#define USB_PDPHY_RX_STATUS_REG 0x4A
-#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
-
-#define USB_PDPHY_FRAME_FILTER_REG 0x4C
-#define FRAME_FILTER_EN_HARD_RESET BIT(5)
-#define FRAME_FILTER_EN_SOP BIT(0)
-
-#define USB_PDPHY_TX_SIZE_REG 0x42
-#define TX_SIZE_MASK 0xF
-
-#define USB_PDPHY_TX_CONTROL_REG 0x44
-#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
-#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
-#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
-#define TX_CONTROL_SEND_SIGNAL BIT(1)
-#define TX_CONTROL_SEND_MSG BIT(0)
-
-#define USB_PDPHY_RX_SIZE_REG 0x48
-
-#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
-#define RX_BUFFER_TOKEN BIT(0)
-
-#define USB_PDPHY_BIST_MODE_REG 0x4E
-#define BIST_MODE_MASK 0xF
-#define BIST_ENABLE BIT(7)
-#define PD_MSG_BIST 0x3
-#define PD_BIST_TEST_DATA_MODE 0x8
-
-#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
-#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
-
-#define USB_PDPHY_RX_BUFFER_REG 0x80
-
-/* VDD regulator */
-#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
-#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
-#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
-
-/* Message Spec Rev field */
-#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
-
-/* timers */
-#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
-#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
-
-/* Interrupt numbers */
-#define PMIC_PDPHY_SIG_TX_IRQ 0x0
-#define PMIC_PDPHY_SIG_RX_IRQ 0x1
-#define PMIC_PDPHY_MSG_TX_IRQ 0x2
-#define PMIC_PDPHY_MSG_RX_IRQ 0x3
-#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
-#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
-#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
-#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
/* Resources */
#define PMIC_PDPHY_MAX_IRQS 0x08
@@ -87,33 +19,19 @@ struct pmic_typec_pdphy_irq_params {
struct pmic_typec_pdphy_resources {
unsigned int nr_irqs;
- struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
+ const struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
};
/* API */
struct pmic_typec_pdphy;
-struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev);
-
+extern const struct pmic_typec_pdphy_resources pm8150b_pdphy_res;
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
- struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct pmic_typec_pdphy_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base);
-
-int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct tcpm_port *tcpm_port);
-
-void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy);
-
-int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
- bool power_role_src, bool data_role_host);
-
-int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on);
-
-int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev);
+int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev,
+ struct pmic_typec *tcpm);
#endif /* __QCOM_PMIC_TYPEC_PDPHY_H__ */
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
new file mode 100644
index 000000000000..df79059cda67
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024, Linaro Ltd. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec.h"
+#include "qcom_pmic_typec_pdphy.h"
+
+static int qcom_pmic_typec_pdphy_stub_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "pdphy_transmit: type=%d\n", type);
+
+ tcpm_pd_transmit_complete(tcpm->tcpm_port,
+ TCPC_TX_SUCCESS);
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off");
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
+ data_role, power_role);
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
+{
+ return 0;
+}
+
+static void qcom_pmic_typec_pdphy_stub_stop(struct pmic_typec *tcpm)
+{
+}
+
+int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev,
+ struct pmic_typec *tcpm)
+{
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_stub_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_stub_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_stub_pd_transmit;
+
+ tcpm->pdphy_start = qcom_pmic_typec_pdphy_stub_start;
+ tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stub_stop;
+
+ return 0;
+}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
index a8f3f4d3a450..a747baa29784 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -16,8 +16,147 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
+
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_port.h"
+#define TYPEC_SNK_STATUS_REG 0x06
+#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
+#define SNK_DAM_MASK GENMASK(6, 4)
+#define SNK_DAM_500MA BIT(6)
+#define SNK_DAM_1500MA BIT(5)
+#define SNK_DAM_3000MA BIT(4)
+#define SNK_RP_STD BIT(3)
+#define SNK_RP_1P5 BIT(2)
+#define SNK_RP_3P0 BIT(1)
+#define SNK_RP_SHORT BIT(0)
+
+#define TYPEC_SRC_STATUS_REG 0x08
+#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
+#define SRC_HIGH_BATT BIT(5)
+#define SRC_DEBUG_ACCESS BIT(4)
+#define SRC_RD_OPEN BIT(3)
+#define SRC_RD_RA_VCONN BIT(2)
+#define SRC_RA_OPEN BIT(1)
+#define AUDIO_ACCESS_RA_RA BIT(0)
+
+#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
+#define TYPEC_ATTACH_DETACH_STATE BIT(5)
+
+#define TYPEC_SM_STATUS_REG 0x0A
+#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
+#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
+#define TYPEC_SM_USBIN_LT_LV BIT(7)
+
+#define TYPEC_MISC_STATUS_REG 0x0B
+#define TYPEC_WATER_DETECTION_STATUS BIT(7)
+#define SNK_SRC_MODE BIT(6)
+#define TYPEC_VBUS_DETECT BIT(5)
+#define TYPEC_VBUS_ERROR_STATUS BIT(4)
+#define TYPEC_DEBOUNCE_DONE BIT(3)
+#define CC_ORIENTATION BIT(1)
+#define CC_ATTACHED BIT(0)
+
+#define LEGACY_CABLE_STATUS_REG 0x0D
+#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
+
+#define TYPEC_U_USB_STATUS_REG 0x0F
+#define U_USB_GROUND_NOVBUS BIT(6)
+#define U_USB_GROUND BIT(4)
+#define U_USB_FMB1 BIT(3)
+#define U_USB_FLOAT1 BIT(2)
+#define U_USB_FMB2 BIT(1)
+#define U_USB_FLOAT2 BIT(0)
+
+#define TYPEC_MODE_CFG_REG 0x44
+#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
+#define EN_TRY_SNK BIT(4)
+#define EN_TRY_SRC BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
+#define EN_SRC_ONLY BIT(2)
+#define EN_SNK_ONLY BIT(1)
+#define TYPEC_DISABLE_CMD BIT(0)
+
+#define TYPEC_VCONN_CONTROL_REG 0x46
+#define VCONN_EN_ORIENTATION BIT(2)
+#define VCONN_EN_VALUE BIT(1)
+#define VCONN_EN_SRC BIT(0)
+
+#define TYPEC_CCOUT_CONTROL_REG 0x48
+#define TYPEC_CCOUT_BUFFER_EN BIT(2)
+#define TYPEC_CCOUT_VALUE BIT(1)
+#define TYPEC_CCOUT_SRC BIT(0)
+
+#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
+#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
+
+#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
+#define EN_SRC_CRUDE_SENSOR BIT(1)
+#define EN_SNK_CRUDE_SENSOR BIT(0)
+
+#define TYPEC_EXIT_STATE_CFG_REG 0x50
+#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
+#define SEL_SRC_UPPER_REF BIT(2)
+#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
+#define EXIT_SNK_BASED_ON_CC BIT(0)
+
+#define TYPEC_CURRSRC_CFG_REG 0x52
+#define TYPEC_SRC_RP_SEL_330UA BIT(1)
+#define TYPEC_SRC_RP_SEL_180UA BIT(0)
+#define TYPEC_SRC_RP_SEL_80UA 0
+#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
+
+#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
+#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
+#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
+#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
+#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
+
+#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
+#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
+#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
+#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
+#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
+#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
+#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
+#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
+
+#define TYPEC_DEBOUNCE_OPTION_REG 0x62
+#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
+
+#define TYPE_C_SBU_CFG_REG 0x6A
+#define SEL_SBU1_ISRC_VAL 0x04
+#define SEL_SBU2_ISRC_VAL 0x01
+
+#define TYPEC_U_USB_CFG_REG 0x70
+#define EN_MICRO_USB_FACTORY_MODE BIT(1)
+#define EN_MICRO_USB_MODE BIT(0)
+
+#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
+
+#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
+#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
+#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
+#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
+
+#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
+#define MICRO_USB_MODE_ONLY BIT(0)
+
+/* Interrupt numbers */
+#define PMIC_TYPEC_OR_RID_IRQ 0x0
+#define PMIC_TYPEC_VPD_IRQ 0x1
+#define PMIC_TYPEC_CC_STATE_IRQ 0x2
+#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
+#define PMIC_TYPEC_VBUS_IRQ 0x4
+#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
+#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
+#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
+
struct pmic_typec_port_irq_data {
int virq;
int irq;
@@ -33,6 +172,8 @@ struct pmic_typec_port {
struct pmic_typec_port_irq_data *irq_data;
struct regulator *vdd_vbus;
+ bool vbus_enabled;
+ struct mutex vbus_lock; /* VBUS state serialization */
int cc;
bool debouncing_cc;
@@ -131,7 +272,7 @@ done:
return IRQ_HANDLED;
}
-int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
+static int qcom_pmic_typec_port_vbus_detect(struct pmic_typec_port *pmic_typec_port)
{
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
@@ -148,7 +289,7 @@ int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
return !!(misc & TYPEC_VBUS_DETECT);
}
-int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on)
+static int qcom_pmic_typec_port_vbus_toggle(struct pmic_typec_port *pmic_typec_port, bool on)
{
u32 sm_stat;
u32 val;
@@ -179,10 +320,49 @@ int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool
return 0;
}
-int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2)
+static int qcom_pmic_typec_port_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
+ int ret;
+
+ mutex_lock(&pmic_typec_port->vbus_lock);
+ ret = pmic_typec_port->vbus_enabled || qcom_pmic_typec_port_vbus_detect(pmic_typec_port);
+ mutex_unlock(&pmic_typec_port->vbus_lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_port_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
+ int ret = 0;
+
+ mutex_lock(&pmic_typec_port->vbus_lock);
+ if (pmic_typec_port->vbus_enabled == on)
+ goto done;
+
+ ret = qcom_pmic_typec_port_vbus_toggle(pmic_typec_port, on);
+ if (ret)
+ goto done;
+
+ pmic_typec_port->vbus_enabled = on;
+ tcpm_vbus_change(tcpm->tcpm_port);
+
+done:
+ dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
+ mutex_unlock(&pmic_typec_port->vbus_lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_port_get_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int misc, val;
bool attached;
@@ -275,9 +455,11 @@ static void qcom_pmic_set_cc_debounce(struct pmic_typec_port *pmic_typec_port)
msecs_to_jiffies(2));
}
-int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status cc)
+static int qcom_pmic_typec_port_set_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int mode, currsrc;
unsigned int misc;
@@ -341,8 +523,17 @@ done:
return ret;
}
-int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on)
+static int qcom_pmic_typec_port_set_polarity(struct tcpc_dev *tcpc,
+ enum typec_cc_polarity pol)
+{
+ /* Polarity is set separately by phy-qcom-qmp.c */
+ return 0;
+}
+
+static int qcom_pmic_typec_port_set_vconn(struct tcpc_dev *tcpc, bool on)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int orientation, misc, mask, value;
unsigned long flags;
@@ -377,10 +568,12 @@ done:
return ret;
}
-int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
- enum typec_port_type port_type,
- enum typec_cc_status cc)
+static int qcom_pmic_typec_port_start_toggling(struct tcpc_dev *tcpc,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
u8 mode = 0;
@@ -441,9 +634,10 @@ done:
(TYPEC_STATE_MACHINE_CHANGE_INT_EN | TYPEC_VBUS_ERROR_INT_EN | \
TYPEC_DEBOUNCE_DONE_INT_EN)
-int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
- struct tcpm_port *tcpm_port)
+static int qcom_pmic_typec_port_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
{
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
int i;
int mask;
int ret;
@@ -491,29 +685,30 @@ done:
return ret;
}
-void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port)
+static void qcom_pmic_typec_port_stop(struct pmic_typec *tcpm)
{
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
int i;
for (i = 0; i < pmic_typec_port->nr_irqs; i++)
disable_irq(pmic_typec_port->irq_data[i].irq);
}
-struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev)
-{
- return devm_kzalloc(dev, sizeof(struct pmic_typec_port), GFP_KERNEL);
-}
-
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
- struct pmic_typec_port *pmic_typec_port,
- struct pmic_typec_port_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base)
{
struct device *dev = &pdev->dev;
struct pmic_typec_port_irq_data *irq_data;
+ struct pmic_typec_port *pmic_typec_port;
int i, ret, irq;
+ pmic_typec_port = devm_kzalloc(dev, sizeof(*pmic_typec_port), GFP_KERNEL);
+ if (!pmic_typec_port)
+ return -ENOMEM;
+
if (!res->nr_irqs || res->nr_irqs > PMIC_TYPEC_MAX_IRQS)
return -EINVAL;
@@ -522,6 +717,8 @@ int qcom_pmic_typec_port_probe(struct platform_device *pdev,
if (!irq_data)
return -ENOMEM;
+ mutex_init(&pmic_typec_port->vbus_lock);
+
pmic_typec_port->vdd_vbus = devm_regulator_get(dev, "vdd-vbus");
if (IS_ERR(pmic_typec_port->vdd_vbus))
return PTR_ERR(pmic_typec_port->vdd_vbus);
@@ -556,5 +753,56 @@ int qcom_pmic_typec_port_probe(struct platform_device *pdev,
return ret;
}
+ tcpm->pmic_typec_port = pmic_typec_port;
+
+ tcpm->tcpc.get_vbus = qcom_pmic_typec_port_get_vbus;
+ tcpm->tcpc.set_vbus = qcom_pmic_typec_port_set_vbus;
+ tcpm->tcpc.set_cc = qcom_pmic_typec_port_set_cc;
+ tcpm->tcpc.get_cc = qcom_pmic_typec_port_get_cc;
+ tcpm->tcpc.set_polarity = qcom_pmic_typec_port_set_polarity;
+ tcpm->tcpc.set_vconn = qcom_pmic_typec_port_set_vconn;
+ tcpm->tcpc.start_toggling = qcom_pmic_typec_port_start_toggling;
+
+ tcpm->port_start = qcom_pmic_typec_port_start;
+ tcpm->port_stop = qcom_pmic_typec_port_stop;
+
return 0;
}
+
+const struct pmic_typec_port_resources pm8150b_port_res = {
+ .irq_params = {
+ {
+ .irq_name = "vpd-detect",
+ .virq = PMIC_TYPEC_VPD_IRQ,
+ },
+
+ {
+ .irq_name = "cc-state-change",
+ .virq = PMIC_TYPEC_CC_STATE_IRQ,
+ },
+ {
+ .irq_name = "vconn-oc",
+ .virq = PMIC_TYPEC_VCONN_OC_IRQ,
+ },
+
+ {
+ .irq_name = "vbus-change",
+ .virq = PMIC_TYPEC_VBUS_IRQ,
+ },
+
+ {
+ .irq_name = "attach-detach",
+ .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
+ },
+ {
+ .irq_name = "legacy-cable-detect",
+ .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
+ },
+
+ {
+ .irq_name = "try-snk-src-detect",
+ .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
+ },
+ },
+ .nr_irqs = 7,
+};
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
index d4d358c680b6..2ca83a46cf3b 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
@@ -3,149 +3,12 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
-#ifndef __QCOM_PMIC_TYPEC_H__
-#define __QCOM_PMIC_TYPEC_H__
+#ifndef __QCOM_PMIC_TYPEC_PORT_H__
+#define __QCOM_PMIC_TYPEC_PORT_H__
#include <linux/platform_device.h>
#include <linux/usb/tcpm.h>
-#define TYPEC_SNK_STATUS_REG 0x06
-#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
-#define SNK_DAM_MASK GENMASK(6, 4)
-#define SNK_DAM_500MA BIT(6)
-#define SNK_DAM_1500MA BIT(5)
-#define SNK_DAM_3000MA BIT(4)
-#define SNK_RP_STD BIT(3)
-#define SNK_RP_1P5 BIT(2)
-#define SNK_RP_3P0 BIT(1)
-#define SNK_RP_SHORT BIT(0)
-
-#define TYPEC_SRC_STATUS_REG 0x08
-#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
-#define SRC_HIGH_BATT BIT(5)
-#define SRC_DEBUG_ACCESS BIT(4)
-#define SRC_RD_OPEN BIT(3)
-#define SRC_RD_RA_VCONN BIT(2)
-#define SRC_RA_OPEN BIT(1)
-#define AUDIO_ACCESS_RA_RA BIT(0)
-
-#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
-#define TYPEC_ATTACH_DETACH_STATE BIT(5)
-
-#define TYPEC_SM_STATUS_REG 0x0A
-#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
-#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
-#define TYPEC_SM_USBIN_LT_LV BIT(7)
-
-#define TYPEC_MISC_STATUS_REG 0x0B
-#define TYPEC_WATER_DETECTION_STATUS BIT(7)
-#define SNK_SRC_MODE BIT(6)
-#define TYPEC_VBUS_DETECT BIT(5)
-#define TYPEC_VBUS_ERROR_STATUS BIT(4)
-#define TYPEC_DEBOUNCE_DONE BIT(3)
-#define CC_ORIENTATION BIT(1)
-#define CC_ATTACHED BIT(0)
-
-#define LEGACY_CABLE_STATUS_REG 0x0D
-#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
-#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
-
-#define TYPEC_U_USB_STATUS_REG 0x0F
-#define U_USB_GROUND_NOVBUS BIT(6)
-#define U_USB_GROUND BIT(4)
-#define U_USB_FMB1 BIT(3)
-#define U_USB_FLOAT1 BIT(2)
-#define U_USB_FMB2 BIT(1)
-#define U_USB_FLOAT2 BIT(0)
-
-#define TYPEC_MODE_CFG_REG 0x44
-#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
-#define EN_TRY_SNK BIT(4)
-#define EN_TRY_SRC BIT(3)
-#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
-#define EN_SRC_ONLY BIT(2)
-#define EN_SNK_ONLY BIT(1)
-#define TYPEC_DISABLE_CMD BIT(0)
-
-#define TYPEC_VCONN_CONTROL_REG 0x46
-#define VCONN_EN_ORIENTATION BIT(2)
-#define VCONN_EN_VALUE BIT(1)
-#define VCONN_EN_SRC BIT(0)
-
-#define TYPEC_CCOUT_CONTROL_REG 0x48
-#define TYPEC_CCOUT_BUFFER_EN BIT(2)
-#define TYPEC_CCOUT_VALUE BIT(1)
-#define TYPEC_CCOUT_SRC BIT(0)
-
-#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
-#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
-
-#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
-#define EN_SRC_CRUDE_SENSOR BIT(1)
-#define EN_SNK_CRUDE_SENSOR BIT(0)
-
-#define TYPEC_EXIT_STATE_CFG_REG 0x50
-#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
-#define SEL_SRC_UPPER_REF BIT(2)
-#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
-#define EXIT_SNK_BASED_ON_CC BIT(0)
-
-#define TYPEC_CURRSRC_CFG_REG 0x52
-#define TYPEC_SRC_RP_SEL_330UA BIT(1)
-#define TYPEC_SRC_RP_SEL_180UA BIT(0)
-#define TYPEC_SRC_RP_SEL_80UA 0
-#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
-
-#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
-#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
-#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
-#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
-#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
-#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
-#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
-#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
-#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
-
-#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
-#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
-#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
-#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
-#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
-#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
-#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
-#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
-
-#define TYPEC_DEBOUNCE_OPTION_REG 0x62
-#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
-
-#define TYPE_C_SBU_CFG_REG 0x6A
-#define SEL_SBU1_ISRC_VAL 0x04
-#define SEL_SBU2_ISRC_VAL 0x01
-
-#define TYPEC_U_USB_CFG_REG 0x70
-#define EN_MICRO_USB_FACTORY_MODE BIT(1)
-#define EN_MICRO_USB_MODE BIT(0)
-
-#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
-
-#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
-#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
-#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
-#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
-
-#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
-#define MICRO_USB_MODE_ONLY BIT(0)
-
-/* Interrupt numbers */
-#define PMIC_TYPEC_OR_RID_IRQ 0x0
-#define PMIC_TYPEC_VPD_IRQ 0x1
-#define PMIC_TYPEC_CC_STATE_IRQ 0x2
-#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
-#define PMIC_TYPEC_VBUS_IRQ 0x4
-#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
-#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
-#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
-
/* Resources */
#define PMIC_TYPEC_MAX_IRQS 0x08
@@ -156,40 +19,17 @@ struct pmic_typec_port_irq_params {
struct pmic_typec_port_resources {
unsigned int nr_irqs;
- struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
+ const struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
};
/* API */
-struct pmic_typec;
-struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev);
+extern const struct pmic_typec_port_resources pm8150b_port_res;
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
- struct pmic_typec_port *pmic_typec_port,
- struct pmic_typec_port_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base);
-int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
- struct tcpm_port *tcpm_port);
-
-void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port);
-
-int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2);
-
-int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status cc);
-
-int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port);
-
-int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on);
-
-int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
- enum typec_port_type port_type,
- enum typec_cc_status cc);
-
-int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on);
-
#endif /* __QCOM_PMIC_TYPE_C_PORT_H__ */
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 0ee3e6e29bb1..c962014bba4e 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -445,8 +445,11 @@ static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
unsigned int reg = 0;
int ret;
- if (enable)
+ if (enable) {
reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
+ if (tcpci->data->cable_comm_capable)
+ reg |= TCPC_RX_DETECT_SOP1;
+ }
ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
if (ret < 0)
return ret;
@@ -584,6 +587,23 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type
return 0;
}
+static bool tcpci_cable_comm_capable(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ return tcpci->data->cable_comm_capable;
+}
+
+static bool tcpci_attempt_vconn_swap_discovery(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ if (tcpci->data->attempt_vconn_swap_discovery)
+ return tcpci->data->attempt_vconn_swap_discovery(tcpci, tcpci->data);
+
+ return false;
+}
+
static int tcpci_init(struct tcpc_dev *tcpc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -712,7 +732,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
- tcpm_pd_receive(tcpci->port, &msg);
+ tcpm_pd_receive(tcpci->port, &msg, TCPC_TX_SOP);
}
if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
@@ -793,6 +813,8 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.enable_frs = tcpci_enable_frs;
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
+ tcpci->tcpc.cable_comm_capable = tcpci_cable_comm_capable;
+ tcpci->tcpc.attempt_vconn_swap_discovery = tcpci_attempt_vconn_swap_discovery;
if (tcpci->data->check_contaminant)
tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
@@ -889,6 +911,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
{ .compatible = "nxp,ptn5110", },
+ { .compatible = "tcpci", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
index 2c1c4d161b0d..78ff3b73ee7e 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -62,6 +62,7 @@ struct max_tcpci_chip {
struct i2c_client *client;
struct tcpm_port *port;
enum contamiant_state contaminant_state;
+ bool veto_vconn_swap;
};
static inline int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 7fb966fd639b..eec3bcec119c 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -128,6 +128,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
u8 count, frame_type, rx_buf[TCPC_RECEIVE_BUFFER_LEN];
int ret, payload_index;
u8 *rx_buf_ptr;
+ enum tcpm_transmit_type rx_type;
/*
* READABLE_BYTE_COUNT: Indicates the number of bytes in the RX_BUF_BYTE_x registers
@@ -143,10 +144,23 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
count = rx_buf[TCPC_RECEIVE_BUFFER_COUNT_OFFSET];
frame_type = rx_buf[TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET];
- if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
+ switch (frame_type) {
+ case TCPC_RX_BUF_FRAME_TYPE_SOP1:
+ rx_type = TCPC_TX_SOP_PRIME;
+ break;
+ case TCPC_RX_BUF_FRAME_TYPE_SOP:
+ rx_type = TCPC_TX_SOP;
+ break;
+ default:
+ rx_type = TCPC_TX_SOP;
+ break;
+ }
+
+ if (count == 0 || (frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP &&
+ frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP1)) {
max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" :
- "error frame_type is not SOP");
+ "error frame_type is not SOP/SOP'");
return;
}
@@ -183,7 +197,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
if (ret < 0)
return;
- tcpm_pd_receive(chip->port, &msg);
+ tcpm_pd_receive(chip->port, &msg, rx_type);
}
static int max_tcpci_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata, bool source, bool sink)
@@ -309,8 +323,10 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
if (ret < 0)
return ret;
- if (reg_status & TCPC_FAULT_STATUS_VCONN_OC)
+ if (reg_status & TCPC_FAULT_STATUS_VCONN_OC) {
+ chip->veto_vconn_swap = true;
tcpm_port_error_recovery(chip->port);
+ }
}
if (status & TCPC_ALERT_EXTND) {
@@ -444,6 +460,18 @@ static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *
tcpm_port_clean(chip->port);
}
+static bool max_tcpci_attempt_vconn_swap_discovery(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+
+ if (chip->veto_vconn_swap) {
+ chip->veto_vconn_swap = false;
+ return false;
+ }
+
+ return true;
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -478,6 +506,8 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->data.vbus_vsafe0v = true;
chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
chip->data.check_contaminant = max_tcpci_check_contaminant;
+ chip->data.cable_comm_capable = true;
+ chip->data.attempt_vconn_swap_discovery = max_tcpci_attempt_vconn_swap_discovery;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 096597231027..ae2b6c94482d 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -108,6 +108,7 @@
S(VCONN_SWAP_WAIT_FOR_VCONN), \
S(VCONN_SWAP_TURN_ON_VCONN), \
S(VCONN_SWAP_TURN_OFF_VCONN), \
+ S(VCONN_SWAP_SEND_SOFT_RESET), \
\
S(FR_SWAP_SEND), \
S(FR_SWAP_SEND_TIMEOUT), \
@@ -145,7 +146,9 @@
S(PORT_RESET_WAIT_OFF), \
\
S(AMS_START), \
- S(CHUNK_NOT_SUPP)
+ S(CHUNK_NOT_SUPP), \
+ \
+ S(SRC_VDM_IDENTITY_REQUEST)
#define FOREACH_AMS(S) \
S(NONE_AMS), \
@@ -327,6 +330,12 @@ struct tcpm_port {
struct typec_partner_desc partner_desc;
struct typec_partner *partner;
+ struct usb_pd_identity cable_ident;
+ struct typec_cable_desc cable_desc;
+ struct typec_cable *cable;
+ struct typec_plug_desc plug_prime_desc;
+ struct typec_plug *plug_prime;
+
enum typec_cc_status cc_req;
enum typec_cc_status src_rp; /* work only if pd_supported == false */
@@ -468,7 +477,9 @@ struct tcpm_port {
/* Alternate mode data */
struct pd_mode_data mode_data;
+ struct pd_mode_data mode_data_prime;
struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
+ struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
/* Deadline in jiffies to exit src_try_wait state */
@@ -505,6 +516,41 @@ struct tcpm_port {
* transitions.
*/
bool potential_contaminant;
+
+ /* SOP* Related Fields */
+ /*
+ * Flag to determine if SOP' Discover Identity is available. The flag
+ * is set if Discover Identity on SOP' does not immediately follow
+ * Discover Identity on SOP.
+ */
+ bool send_discover_prime;
+ /*
+ * tx_sop_type determines which SOP* a message is being sent on.
+ * For messages that are queued and not sent immediately such as in
+ * tcpm_queue_message or messages that send after state changes,
+ * the tx_sop_type is set accordingly.
+ */
+ enum tcpm_transmit_type tx_sop_type;
+ /*
+ * Prior to discovering the port partner's Specification Revision, the
+ * Vconn source and cable plug will use the lower of their two revisions.
+ *
+ * When the port partner's Specification Revision is discovered, the following
+ * rules are put in place.
+ * 1. If the cable revision (1) is lower than the revision negotiated
+ * between the port and partner (2), the port and partner will communicate
+ * on revision (2), but the port and cable will communicate on revision (1).
+ * 2. If the cable revision (1) is higher than the revision negotiated
+ * between the port and partner (2), the port and partner will communicate
+ * on revision (2), and the port and cable will communicate on revision (2)
+ * as well.
+ */
+ unsigned int negotiated_rev_prime;
+ /*
+ * Each SOP* type must maintain their own tx and rx message IDs
+ */
+ unsigned int message_id_prime;
+ unsigned int rx_msgid_prime;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -518,6 +564,7 @@ struct pd_rx_event {
struct kthread_work work;
struct tcpm_port *port;
struct pd_message msg;
+ enum tcpm_transmit_type rx_sop_type;
};
static const char * const pd_rev[] = {
@@ -893,19 +940,30 @@ static void tcpm_ams_finish(struct tcpm_port *port)
}
static int tcpm_pd_transmit(struct tcpm_port *port,
- enum tcpm_transmit_type type,
+ enum tcpm_transmit_type tx_sop_type,
const struct pd_message *msg)
{
unsigned long timeout;
int ret;
+ unsigned int negotiated_rev;
+
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ negotiated_rev = port->negotiated_rev_prime;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ negotiated_rev = port->negotiated_rev;
+ break;
+ }
if (msg)
tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
else
- tcpm_log(port, "PD TX, type: %#x", type);
+ tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
reinit_completion(&port->tx_complete);
- ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
+ ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
if (ret < 0)
return ret;
@@ -918,7 +976,17 @@ static int tcpm_pd_transmit(struct tcpm_port *port,
switch (port->tx_status) {
case TCPC_TX_SUCCESS:
- port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ port->message_id_prime = (port->message_id_prime + 1) &
+ PD_HEADER_ID_MASK;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ port->message_id = (port->message_id + 1) &
+ PD_HEADER_ID_MASK;
+ break;
+ }
/*
* USB PD rev 2.0, 8.3.2.2.1:
* USB PD rev 3.0, 8.3.2.1.3:
@@ -1099,6 +1167,12 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
if (ret < 0)
return ret;
+ if (port->tcpc->set_orientation) {
+ ret = port->tcpc->set_orientation(port->tcpc, orientation);
+ if (ret < 0)
+ return ret;
+ }
+
port->pwr_role = role;
port->data_role = data;
typec_set_data_role(port->typec_port, data);
@@ -1456,7 +1530,7 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
* VDM/VDO handling functions
*/
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt)
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
u32 vdo_hdr = port->vdo_data[0];
@@ -1464,7 +1538,10 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
/* If is sending discover_identity, handle received message first */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
- port->send_discover = true;
+ if (tx_sop_type == TCPC_TX_SOP_PRIME)
+ port->send_discover_prime = true;
+ else
+ port->send_discover = true;
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
} else {
/* Make sure we are not still processing a previous VDM packet */
@@ -1479,14 +1556,16 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
port->vdm_state = VDM_STATE_READY;
port->vdm_sm_running = true;
+ port->tx_sop_type = tx_sop_type;
+
mod_vdm_delayed_work(port, 0);
}
static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt)
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
mutex_lock(&port->lock);
- tcpm_queue_vdm(port, header, data, cnt);
+ tcpm_queue_vdm(port, header, data, cnt, TCPC_TX_SOP);
mutex_unlock(&port->lock);
}
@@ -1508,9 +1587,68 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
PD_PRODUCT_PID(product), product & 0xffff);
}
-static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
+static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
{
- struct pd_mode_data *pmdata = &port->mode_data;
+ u32 idh = p[VDO_INDEX_IDH];
+ u32 product = p[VDO_INDEX_PRODUCT];
+ int svdm_version;
+
+ /*
+ * Attempt to consume identity only if cable currently is not set
+ */
+ if (!IS_ERR_OR_NULL(port->cable))
+ goto register_plug;
+
+ /* Reset cable identity */
+ memset(&port->cable_ident, 0, sizeof(port->cable_ident));
+
+ /* Fill out id header, cert, product, cable VDO 1 */
+ port->cable_ident.id_header = idh;
+ port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
+ port->cable_ident.product = product;
+ port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
+
+ /* Fill out cable desc, infer svdm_version from pd revision */
+ port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
+ USB_PLUG_TYPE_A);
+ port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
+ /* Log PD Revision and additional cable VDO from negotiated revision */
+ switch (port->negotiated_rev_prime) {
+ case PD_REV30:
+ port->cable_desc.pd_revision = 0x0300;
+ if (port->cable_desc.active)
+ port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
+ break;
+ case PD_REV20:
+ port->cable_desc.pd_revision = 0x0200;
+ break;
+ default:
+ port->cable_desc.pd_revision = 0x0200;
+ break;
+ }
+ port->cable_desc.identity = &port->cable_ident;
+ /* Register Cable, set identity and svdm_version */
+ port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
+ if (IS_ERR_OR_NULL(port->cable))
+ return;
+ typec_cable_set_identity(port->cable);
+ /* Get SVDM version */
+ svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
+ typec_cable_set_svdm_version(port->cable, svdm_version);
+
+register_plug:
+ if (IS_ERR_OR_NULL(port->plug_prime)) {
+ port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
+ port->plug_prime = typec_register_plug(port->cable,
+ &port->plug_prime_desc);
+ }
+}
+
+static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
+{
+ struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
+ &port->mode_data_prime : &port->mode_data;
int i;
for (i = 1; i < cnt; i++) {
@@ -1556,14 +1694,29 @@ abort:
return false;
}
-static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
+static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
{
struct pd_mode_data *pmdata = &port->mode_data;
struct typec_altmode_desc *paltmode;
int i;
- if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
- /* Already logged in svdm_consume_svids() */
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ pmdata = &port->mode_data_prime;
+ if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+ break;
+ case TCPC_TX_SOP:
+ pmdata = &port->mode_data;
+ if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+ break;
+ default:
return;
}
@@ -1601,20 +1754,129 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
}
}
+static void tcpm_register_plug_altmodes(struct tcpm_port *port)
+{
+ struct pd_mode_data *modep = &port->mode_data_prime;
+ struct typec_altmode *altmode;
+ int i;
+
+ typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
+
+ for (i = 0; i < modep->altmodes; i++) {
+ altmode = typec_plug_register_altmode(port->plug_prime,
+ &modep->altmode_desc[i]);
+ if (IS_ERR(altmode)) {
+ tcpm_log(port, "Failed to register plug SVID 0x%04x",
+ modep->altmode_desc[i].svid);
+ altmode = NULL;
+ }
+ port->plug_prime_altmode[i] = altmode;
+ }
+}
+
#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
+#define supports_modal_cable(port) PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
+#define supports_host(port) PD_IDH_HOST_SUPP((port->partner_ident.id_header))
+
+/*
+ * Helper to determine whether the port is capable of SOP' communication at the
+ * current point in time.
+ */
+static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
+{
+ /* Check to see if tcpc supports SOP' communication */
+ if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
+ return false;
+ /*
+ * Power Delivery 2.0 Section 6.3.11
+ * Before communicating with a Cable Plug a Port Should ensure that it
+ * is the Vconn Source and that the Cable Plugs are powered by
+ * performing a Vconn swap if necessary. Since it cannot be guaranteed
+ * that the present Vconn Source is supplying Vconn, the only means to
+ * ensure that the Cable Plugs are powered is for a Port wishing to
+ * communicate with a Cable Plug is to become the Vconn Source.
+ *
+ * Power Delivery 3.0 Section 6.3.11
+ * Before communicating with a Cable Plug a Port Shall ensure that it
+ * is the Vconn source.
+ */
+ if (port->vconn_role != TYPEC_SOURCE)
+ return false;
+ /*
+ * Power Delivery 2.0 Section 2.4.4
+ * When no Contract or an Implicit Contract is in place the Source can
+ * communicate with a Cable Plug using SOP' packets in order to discover
+ * its characteristics.
+ *
+ * Power Delivery 3.0 Section 2.4.4
+ * When no Contract or an Implicit Contract is in place only the Source
+ * port that is supplying Vconn is allowed to send packets to a Cable
+ * Plug and is allowed to respond to packets from the Cable Plug.
+ */
+ if (!port->explicit_contract)
+ return port->pwr_role == TYPEC_SOURCE;
+ if (port->negotiated_rev == PD_REV30)
+ return true;
+ /*
+ * Power Delivery 2.0 Section 2.4.4
+ *
+ * When an Explicit Contract is in place the DFP (either the Source or
+ * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
+ * Packets (see Figure 2-3).
+ */
+ if (port->negotiated_rev == PD_REV20)
+ return port->data_role == TYPEC_HOST;
+ return false;
+}
+
+static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
+{
+ if (!port->tcpc->attempt_vconn_swap_discovery)
+ return false;
+
+ /* Port is already source, no need to perform swap */
+ if (port->vconn_role == TYPEC_SOURCE)
+ return false;
+
+ /*
+ * Partner needs to support Alternate Modes with modal support. If
+ * partner is also capable of being a USB Host, it could be a device
+ * that supports Alternate Modes as the DFP.
+ */
+ if (!supports_modal(port) || supports_host(port))
+ return false;
+
+ if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
+ port->negotiated_rev == PD_REV30)
+ return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
+
+ return false;
+}
+
+
+static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
+{
+ return !IS_ERR_OR_NULL(port->cable) &&
+ typec_cable_is_active(port->cable) &&
+ supports_modal_cable(port) &&
+ tcpm_can_communicate_sop_prime(port);
+}
static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
const u32 *p, int cnt, u32 *response,
- enum adev_actions *adev_action)
+ enum adev_actions *adev_action,
+ enum tcpm_transmit_type rx_sop_type,
+ enum tcpm_transmit_type *response_tx_sop_type)
{
struct typec_port *typec = port->typec_port;
- struct typec_altmode *pdev;
- struct pd_mode_data *modep;
+ struct typec_altmode *pdev, *pdev_prime;
+ struct pd_mode_data *modep, *modep_prime;
int svdm_version;
int rlen = 0;
int cmd_type;
int cmd;
int i;
+ int ret;
cmd_type = PD_VDO_CMDT(p[0]);
cmd = PD_VDO_CMD(p[0]);
@@ -1622,17 +1884,54 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
p[0], cmd_type, cmd, cnt);
- modep = &port->mode_data;
-
- pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
- PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
-
- svdm_version = typec_get_negotiated_svdm_version(typec);
- if (svdm_version < 0)
- return 0;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ modep_prime = &port->mode_data_prime;
+ pdev_prime = typec_match_altmode(port->plug_prime_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_cable_svdm_version(typec);
+ /*
+ * Update SVDM version if cable was discovered before port partner.
+ */
+ if (!IS_ERR_OR_NULL(port->cable) &&
+ PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ typec_cable_set_svdm_version(port->cable, svdm_version);
+ break;
+ case TCPC_TX_SOP:
+ modep = &port->mode_data;
+ pdev = typec_match_altmode(port->partner_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+ break;
+ default:
+ modep = &port->mode_data;
+ pdev = typec_match_altmode(port->partner_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+ break;
+ }
switch (cmd_type) {
case CMDT_INIT:
+ /*
+ * Only the port or port partner is allowed to initialize SVDM
+ * commands over SOP'. In case the port partner initializes a
+ * sequence when it is not allowed to send SOP' messages, drop
+ * the message should the TCPM port try to process it.
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME)
+ return 0;
+
switch (cmd) {
case CMD_DISCOVER_IDENT:
if (PD_VDO_VID(p[0]) != USB_SID_PD)
@@ -1699,55 +1998,186 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
(VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
break;
case CMDT_RSP_ACK:
- /* silently drop message if we are not connected */
- if (IS_ERR_OR_NULL(port->partner))
+ /*
+ * Silently drop message if we are not connected, but can process
+ * if SOP' Discover Identity prior to explicit contract.
+ */
+ if (IS_ERR_OR_NULL(port->partner) &&
+ !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
break;
tcpm_ams_finish(port);
switch (cmd) {
+ /*
+ * SVDM Command Flow for SOP and SOP':
+ * SOP Discover Identity
+ * SOP' Discover Identity
+ * SOP Discover SVIDs
+ * Discover Modes
+ * (Active Cables)
+ * SOP' Discover SVIDs
+ * Discover Modes
+ *
+ * Perform Discover SOP' if the port can communicate with cable
+ * plug.
+ */
case CMD_DISCOVER_IDENT:
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
- typec_partner_set_svdm_version(port->partner,
- PD_VDO_SVDM_VER(p[0]));
- /* 6.4.4.3.1 */
- svdm_consume_identity(port, p, cnt);
- response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
- CMD_DISCOVER_SVID);
- rlen = 1;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP:
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
+ /* If cable is discovered before partner, downgrade svdm */
+ if (!IS_ERR_OR_NULL(port->cable) &&
+ (typec_get_cable_svdm_version(port->typec_port) >
+ svdm_version))
+ typec_cable_set_svdm_version(port->cable,
+ svdm_version);
+ }
+ /* 6.4.4.3.1 */
+ svdm_consume_identity(port, p, cnt);
+ /* Attempt Vconn swap, delay SOP' discovery if necessary */
+ if (tcpm_attempt_vconn_swap_discovery(port)) {
+ port->send_discover_prime = true;
+ port->upcoming_state = VCONN_SWAP_SEND;
+ ret = tcpm_ams_start(port, VCONN_SWAP);
+ if (!ret)
+ return 0;
+ /* Cannot perform Vconn swap */
+ port->upcoming_state = INVALID_STATE;
+ port->send_discover_prime = false;
+ }
+
+ /*
+ * Attempt Discover Identity on SOP' if the
+ * cable was not discovered previously, and use
+ * the SVDM version of the partner to probe.
+ */
+ if (IS_ERR_OR_NULL(port->cable) &&
+ tcpm_can_communicate_sop_prime(port)) {
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ port->send_discover_prime = true;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_IDENT);
+ rlen = 1;
+ } else {
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ }
+ break;
+ case TCPC_TX_SOP_PRIME:
+ /*
+ * svdm_consume_identity_sop_prime will determine
+ * the svdm_version for the cable moving forward.
+ */
+ svdm_consume_identity_sop_prime(port, p, cnt);
+
+ /*
+ * If received in SRC_VDM_IDENTITY_REQUEST, continue
+ * to SRC_SEND_CAPABILITIES
+ */
+ if (port->state == SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ return 0;
+ }
+
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ break;
+ default:
+ return 0;
+ }
break;
case CMD_DISCOVER_SVID:
+ *response_tx_sop_type = rx_sop_type;
/* 6.4.4.3.2 */
- if (svdm_consume_svids(port, p, cnt)) {
+ if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
rlen = 1;
- } else if (modep->nsvids && supports_modal(port)) {
- response[0] = VDO(modep->svids[0], 1, svdm_version,
- CMD_DISCOVER_MODES);
- rlen = 1;
+ } else {
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (modep->nsvids && supports_modal(port)) {
+ response[0] = VDO(modep->svids[0], 1, svdm_version,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (modep_prime->nsvids) {
+ response[0] = VDO(modep_prime->svids[0], 1,
+ svdm_version, CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ }
}
break;
case CMD_DISCOVER_MODES:
- /* 6.4.4.3.3 */
- svdm_consume_modes(port, p, cnt);
- modep->svid_index++;
- if (modep->svid_index < modep->nsvids) {
- u16 svid = modep->svids[modep->svid_index];
- response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
- rlen = 1;
- } else {
- tcpm_register_partner_altmodes(port);
+ if (rx_sop_type == TCPC_TX_SOP) {
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, p, cnt, rx_sop_type);
+ modep->svid_index++;
+ if (modep->svid_index < modep->nsvids) {
+ u16 svid = modep->svids[modep->svid_index];
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(svid, 1, svdm_version,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else if (tcpm_cable_vdm_supported(port)) {
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_cable_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ } else {
+ tcpm_register_partner_altmodes(port);
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, p, cnt, rx_sop_type);
+ modep_prime->svid_index++;
+ if (modep_prime->svid_index < modep_prime->nsvids) {
+ u16 svid = modep_prime->svids[modep_prime->svid_index];
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ response[0] = VDO(svid, 1,
+ typec_get_cable_svdm_version(typec),
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else {
+ tcpm_register_plug_altmodes(port);
+ tcpm_register_partner_altmodes(port);
+ }
}
break;
case CMD_ENTER_MODE:
- if (adev && pdev)
- *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ *response_tx_sop_type = rx_sop_type;
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, true);
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (adev && pdev_prime) {
+ typec_altmode_update_active(pdev_prime, true);
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ }
+ }
return 0;
case CMD_EXIT_MODE:
- if (adev && pdev) {
- /* Back to USB Operation */
- *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
- return 0;
+ *response_tx_sop_type = rx_sop_type;
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, false);
+ /* Back to USB Operation */
+ *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
+ return 0;
+ }
}
break;
case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
@@ -1800,13 +2230,15 @@ static void tcpm_pd_handle_msg(struct tcpm_port *port,
enum tcpm_ams ams);
static void tcpm_handle_vdm_request(struct tcpm_port *port,
- const __le32 *payload, int cnt)
+ const __le32 *payload, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
{
enum adev_actions adev_action = ADEV_NONE;
struct typec_altmode *adev;
u32 p[PD_MAX_PAYLOAD];
u32 response[8] = { };
int i, rlen = 0;
+ enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
for (i = 0; i < cnt; i++)
p[i] = le32_to_cpu(payload[i]);
@@ -1841,7 +2273,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
* - We will send NAK and the flag will be cleared in the state machine.
*/
port->vdm_sm_running = true;
- rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+ rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
+ rx_sop_type, &response_tx_sop_type);
} else {
if (port->negotiated_rev >= PD_REV30)
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
@@ -1877,19 +2310,37 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM:
- typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
+ typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
+ else
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- int svdm_version = typec_get_negotiated_svdm_version(
- port->typec_port);
- if (svdm_version < 0)
- break;
-
- response[0] = VDO(adev->svid, 1, svdm_version,
- CMD_EXIT_MODE);
- response[0] |= VDO_OPOS(adev->mode);
- rlen = 1;
+ if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
+ p[0], &p[1], cnt)) {
+ int svdm_version = typec_get_cable_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
+ } else {
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ int svdm_version = typec_get_negotiated_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
}
break;
case ADEV_ATTENTION:
@@ -1909,19 +2360,38 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
mutex_lock(&port->lock);
if (rlen > 0)
- tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+ tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
else
port->vdm_sm_running = false;
}
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
- const u32 *data, int count)
+ const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
{
- int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ int svdm_version;
u32 header;
- if (svdm_version < 0)
- return;
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ /*
+ * If the port partner is discovered, then the port partner's
+ * SVDM Version will be returned
+ */
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ svdm_version = SVDM_VER_MAX;
+ break;
+ case TCPC_TX_SOP:
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return;
+ break;
+ default:
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return;
+ break;
+ }
if (WARN_ON(count > VDO_MAX_SIZE - 1))
count = VDO_MAX_SIZE - 1;
@@ -1930,7 +2400,7 @@ static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
svdm_version, cmd);
- tcpm_queue_vdm(port, header, data, count);
+ tcpm_queue_vdm(port, header, data, count, tx_sop_type);
}
static unsigned int vdm_ready_timeout(u32 vdm_hdr)
@@ -1964,6 +2434,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
struct pd_message msg;
int i, res = 0;
u32 vdo_hdr = port->vdo_data[0];
+ u32 response[8] = { };
switch (port->vdm_state) {
case VDM_STATE_READY:
@@ -1977,7 +2448,8 @@ static void vdm_run_state_machine(struct tcpm_port *port)
* if there's traffic or we're not in PDO ready state don't send
* a VDM.
*/
- if (port->state != SRC_READY && port->state != SNK_READY) {
+ if (port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) {
port->vdm_sm_running = false;
break;
}
@@ -1988,7 +2460,17 @@ static void vdm_run_state_machine(struct tcpm_port *port)
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0) {
- port->send_discover = false;
+ switch (port->tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ port->send_discover_prime = false;
+ break;
+ case TCPC_TX_SOP:
+ port->send_discover = false;
+ break;
+ default:
+ port->send_discover = false;
+ break;
+ }
} else if (res == -EAGAIN) {
port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
@@ -2044,12 +2526,21 @@ static void vdm_run_state_machine(struct tcpm_port *port)
break;
case VDM_STATE_ERR_SEND:
/*
+ * When sending Discover Identity to SOP' before establishing an
+ * explicit contract, do not retry. Instead, weave sending
+ * Source_Capabilities over SOP and Discover Identity over SOP'.
+ */
+ if (port->state == SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_ams_finish(port);
+ port->vdm_state = VDM_STATE_DONE;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ /*
* A partner which does not support USB PD will not reply,
* so this is not a fatal error. At the same time, some
* devices may not return GoodCRC under some circumstances,
* so we need to retry.
*/
- if (port->vdm_retries < 3) {
+ } else if (port->vdm_retries < 3) {
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
@@ -2057,19 +2548,59 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_ams_finish(port);
} else {
tcpm_ams_finish(port);
+ if (port->tx_sop_type == TCPC_TX_SOP)
+ break;
+ /* Handle SOP' Transmission Errors */
+ switch (PD_VDO_CMD(vdo_hdr)) {
+ /*
+ * If Discover Identity fails on SOP', then resume
+ * discovery process on SOP only.
+ */
+ case CMD_DISCOVER_IDENT:
+ port->vdo_data[0] = 0;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(
+ port->typec_port),
+ CMD_DISCOVER_SVID);
+ tcpm_queue_vdm(port, response[0], &response[1],
+ 0, TCPC_TX_SOP);
+ break;
+ /*
+ * If Discover SVIDs or Discover Modes fail, then
+ * proceed with Alt Mode discovery process on SOP.
+ */
+ case CMD_DISCOVER_SVID:
+ tcpm_register_partner_altmodes(port);
+ break;
+ case CMD_DISCOVER_MODES:
+ tcpm_register_partner_altmodes(port);
+ break;
+ default:
+ break;
+ }
}
break;
case VDM_STATE_SEND_MESSAGE:
/* Prepare and send VDM */
memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
- port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, port->vdo_count);
+ if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ 0, /* Cable Plug Indicator for DFP/UFP */
+ 0, /* Reserved */
+ port->negotiated_rev_prime,
+ port->message_id_prime,
+ port->vdo_count);
+ } else {
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ port->vdo_count);
+ }
for (i = 0; i < port->vdo_count; i++)
msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
- res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
if (res < 0) {
port->vdm_state = VDM_STATE_ERR_SEND;
} else {
@@ -2244,7 +2775,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
+ tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
return 0;
}
@@ -2261,7 +2792,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode)
header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, NULL, 0);
+ tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
return 0;
}
@@ -2270,7 +2801,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
- tcpm_queue_vdm_unlocked(port, header, data, count - 1);
+ tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
return 0;
}
@@ -2281,6 +2812,58 @@ static const struct typec_altmode_ops tcpm_altmode_ops = {
.vdm = tcpm_altmode_vdm,
};
+
+static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
+ u32 *vdo)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
+ u32 header;
+
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
+ return 0;
+}
+
+static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
+ u32 header;
+
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
+ return 0;
+}
+
+static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
+ u32 header, const u32 *data, int count)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+
+ tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
+
+ return 0;
+}
+
+static const struct typec_cable_ops tcpm_cable_ops = {
+ .enter = tcpm_cable_altmode_enter,
+ .exit = tcpm_cable_altmode_exit,
+ .vdm = tcpm_cable_altmode_vdm,
+};
+
/*
* PD (data, control) command handling functions
*/
@@ -2293,7 +2876,8 @@ static inline enum tcpm_state ready_state(struct tcpm_port *port)
}
static int tcpm_pd_send_control(struct tcpm_port *port,
- enum pd_ctrl_msg_type type);
+ enum pd_ctrl_msg_type type,
+ enum tcpm_transmit_type tx_sop_type);
static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
int cnt)
@@ -2455,7 +3039,8 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
}
static void tcpm_pd_data_request(struct tcpm_port *port,
- const struct pd_message *msg)
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
enum pd_data_msg_type type = pd_header_type_le(msg->header);
unsigned int cnt = pd_header_cnt_le(msg->header);
@@ -2496,8 +3081,11 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
break;
}
- if (rev < PD_MAX_REV)
+ if (rev < PD_MAX_REV) {
port->negotiated_rev = rev;
+ if (port->negotiated_rev_prime > port->negotiated_rev)
+ port->negotiated_rev_prime = port->negotiated_rev;
+ }
if (port->pwr_role == TYPEC_SOURCE) {
if (port->ams == GET_SOURCE_CAPABILITIES)
@@ -2548,8 +3136,11 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
break;
}
- if (rev < PD_MAX_REV)
+ if (rev < PD_MAX_REV) {
port->negotiated_rev = rev;
+ if (port->negotiated_rev_prime > port->negotiated_rev)
+ port->negotiated_rev_prime = port->negotiated_rev;
+ }
if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
tcpm_pd_handle_msg(port,
@@ -2605,7 +3196,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
- tcpm_handle_vdm_request(port, msg->payload, cnt);
+ tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
@@ -2647,10 +3238,12 @@ static void tcpm_pps_complete(struct tcpm_port *port, int result)
}
static void tcpm_pd_ctrl_request(struct tcpm_port *port,
- const struct pd_message *msg)
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
enum tcpm_state next_state;
+ unsigned int rev = pd_header_rev_le(msg->header);
/*
* Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
@@ -2815,6 +3408,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case SOFT_RESET_SEND:
if (port->ams == SOFT_RESET_AMS)
tcpm_ams_finish(port);
+ /*
+ * SOP' Soft Reset is done after Vconn Swap,
+ * which returns to ready state
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (rev < port->negotiated_rev_prime)
+ port->negotiated_rev_prime = rev;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ }
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
tcpm_ams_start(port, POWER_NEGOTIATION);
@@ -2981,6 +3584,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
const struct pd_message *msg = &event->msg;
unsigned int cnt = pd_header_cnt_le(msg->header);
struct tcpm_port *port = event->port;
+ enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
mutex_lock(&port->lock);
@@ -2992,6 +3596,14 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
unsigned int msgid = pd_header_msgid_le(msg->header);
/*
+ * Drop SOP' messages if cannot receive via
+ * tcpm_can_communicate_sop_prime
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME &&
+ !tcpm_can_communicate_sop_prime(port))
+ goto done;
+
+ /*
* USB PD standard, 6.6.1.2:
* "... if MessageID value in a received Message is the
* same as the stored value, the receiver shall return a
@@ -3000,16 +3612,26 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
* Message). Note: this shall not apply to the Soft_Reset
* Message which always has a MessageID value of zero."
*/
- if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
- goto done;
- port->rx_msgid = msgid;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ if (msgid == port->rx_msgid_prime)
+ goto done;
+ port->rx_msgid_prime = msgid;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
+ goto done;
+ port->rx_msgid = msgid;
+ break;
+ }
/*
* If both ends believe to be DFP/host, we have a data role
* mismatch.
*/
if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
- (port->data_role == TYPEC_HOST)) {
+ (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
tcpm_log(port,
"Data role mismatch, initiating error recovery");
tcpm_set_state(port, ERROR_RECOVERY, 0);
@@ -3017,9 +3639,9 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
tcpm_pd_ext_msg_request(port, msg);
else if (cnt)
- tcpm_pd_data_request(port, msg);
+ tcpm_pd_data_request(port, msg, rx_sop_type);
else
- tcpm_pd_ctrl_request(port, msg);
+ tcpm_pd_ctrl_request(port, msg, rx_sop_type);
}
}
@@ -3028,7 +3650,8 @@ done:
kfree(event);
}
-void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
+void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
struct pd_rx_event *event;
@@ -3038,23 +3661,47 @@ void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
kthread_init_work(&event->work, tcpm_pd_rx_handler);
event->port = port;
+ event->rx_sop_type = rx_sop_type;
memcpy(&event->msg, msg, sizeof(*msg));
kthread_queue_work(port->wq, &event->work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_receive);
static int tcpm_pd_send_control(struct tcpm_port *port,
- enum pd_ctrl_msg_type type)
+ enum pd_ctrl_msg_type type,
+ enum tcpm_transmit_type tx_sop_type)
{
struct pd_message msg;
memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(type, port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, 0);
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ msg.header = PD_HEADER_LE(type,
+ 0, /* Cable Plug Indicator for DFP/UFP */
+ 0, /* Reserved */
+ port->negotiated_rev,
+ port->message_id_prime,
+ 0);
+ break;
+ case TCPC_TX_SOP:
+ msg.header = PD_HEADER_LE(type,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 0);
+ break;
+ default:
+ msg.header = PD_HEADER_LE(type,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 0);
+ break;
+ }
- return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ return tcpm_pd_transmit(port, tx_sop_type, &msg);
}
/*
@@ -3073,13 +3720,13 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
switch (queued_message) {
case PD_MSG_CTRL_WAIT:
- tcpm_pd_send_control(port, PD_CTRL_WAIT);
+ tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
break;
case PD_MSG_CTRL_REJECT:
- tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
break;
case PD_MSG_CTRL_NOT_SUPP:
- tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
break;
case PD_MSG_DATA_SINK_CAP:
ret = tcpm_pd_send_sink_caps(port);
@@ -3649,6 +4296,7 @@ static int tcpm_src_attach(struct tcpm_port *port)
port->attached = true;
port->send_discover = true;
+ port->send_discover_prime = false;
return 0;
@@ -3665,6 +4313,15 @@ out_disable_mux:
static void tcpm_typec_disconnect(struct tcpm_port *port)
{
+ /*
+ * Unregister plug/cable outside of port->connected because cable can
+ * be discovered before SRC_READY/SNK_READY states where port->connected
+ * is set.
+ */
+ typec_unregister_plug(port->plug_prime);
+ typec_unregister_cable(port->cable);
+ port->plug_prime = NULL;
+ port->cable = NULL;
if (port->connected) {
typec_partner_set_usb_power_delivery(port->partner, NULL);
typec_unregister_partner(port->partner);
@@ -3676,14 +4333,20 @@ static void tcpm_typec_disconnect(struct tcpm_port *port)
static void tcpm_unregister_altmodes(struct tcpm_port *port)
{
struct pd_mode_data *modep = &port->mode_data;
+ struct pd_mode_data *modep_prime = &port->mode_data_prime;
int i;
for (i = 0; i < modep->altmodes; i++) {
typec_unregister_altmode(port->partner_altmode[i]);
port->partner_altmode[i] = NULL;
}
+ for (i = 0; i < modep_prime->altmodes; i++) {
+ typec_unregister_altmode(port->plug_prime_altmode[i]);
+ port->plug_prime_altmode[i] = NULL;
+ }
memset(modep, 0, sizeof(*modep));
+ memset(modep_prime, 0, sizeof(*modep_prime));
}
static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
@@ -3712,6 +4375,7 @@ static void tcpm_reset_port(struct tcpm_port *port)
* we can check tcpm_pd_rx_handler() if we had seen it before.
*/
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->tcpc->set_pd_rx(port->tcpc, false);
tcpm_init_vbus(port); /* also disables charging */
@@ -3783,6 +4447,7 @@ static int tcpm_snk_attach(struct tcpm_port *port)
port->attached = true;
port->send_discover = true;
+ port->send_discover_prime = false;
return 0;
}
@@ -4023,8 +4688,11 @@ static void run_state_machine(struct tcpm_port *port)
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->caps_count = 0;
port->negotiated_rev = PD_MAX_REV;
+ port->negotiated_rev_prime = PD_MAX_REV;
port->message_id = 0;
+ port->message_id_prime = 0;
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->explicit_contract = false;
/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
if (port->ams == POWER_ROLE_SWAP ||
@@ -4045,8 +4713,12 @@ static void run_state_machine(struct tcpm_port *port)
}
ret = tcpm_pd_send_source_caps(port);
if (ret < 0) {
- tcpm_set_state(port, SRC_SEND_CAPABILITIES,
- PD_T_SEND_SOURCE_CAP);
+ if (tcpm_can_communicate_sop_prime(port) &&
+ IS_ERR_OR_NULL(port->cable))
+ tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
+ else
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES,
+ PD_T_SEND_SOURCE_CAP);
} else {
/*
* Per standard, we should clear the reset counter here.
@@ -4087,7 +4759,7 @@ static void run_state_machine(struct tcpm_port *port)
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
- tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
if (!port->explicit_contract) {
tcpm_set_state(port,
SRC_WAIT_NEW_CAPABILITIES, 0);
@@ -4095,7 +4767,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SRC_READY, 0);
}
} else {
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_set_partner_usb_comm_capable(port,
!!(port->sink_request & RDO_USB_COMM));
tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
@@ -4104,7 +4776,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case SRC_TRANSITION_SUPPLY:
/* XXX: regulator_set_voltage(vbus, ...) */
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
port->explicit_contract = true;
typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
port->pwr_opmode = TYPEC_PWR_MODE_PD;
@@ -4141,14 +4813,23 @@ static void run_state_machine(struct tcpm_port *port)
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
- * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
- * port->explicit_contract to decide whether to send the command.
+ *
+ * Discover Identity on SOP' should be discovered prior to the
+ * ready state, but if done after a Vconn Swap following Discover
+ * Identity on SOP then the discovery process can be run here
+ * as well.
*/
if (port->explicit_contract) {
- tcpm_set_initial_svdm_version(port);
+ if (port->send_discover_prime) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ } else {
+ port->tx_sop_type = TCPC_TX_SOP;
+ tcpm_set_initial_svdm_version(port);
+ }
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
+ port->send_discover_prime = false;
}
/*
@@ -4264,8 +4945,11 @@ static void run_state_machine(struct tcpm_port *port)
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->negotiated_rev = PD_MAX_REV;
+ port->negotiated_rev_prime = PD_MAX_REV;
port->message_id = 0;
+ port->message_id_prime = 0;
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->explicit_contract = false;
if (port->ams == POWER_ROLE_SWAP ||
@@ -4437,14 +5121,23 @@ static void run_state_machine(struct tcpm_port *port)
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
- * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
- * port->explicit_contract.
+ *
+ * Discover Identity on SOP' should be discovered prior to the
+ * ready state, but if done after a Vconn Swap following Discover
+ * Identity on SOP then the discovery process can be run here
+ * as well.
*/
if (port->explicit_contract) {
- tcpm_set_initial_svdm_version(port);
+ if (port->send_discover_prime) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ } else {
+ port->tx_sop_type = TCPC_TX_SOP;
+ tcpm_set_initial_svdm_version(port);
+ }
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
+ port->send_discover_prime = false;
}
power_supply_changed(port->psy);
@@ -4485,6 +5178,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_unregister_altmodes(port);
port->nr_sink_caps = 0;
port->send_discover = true;
+ port->send_discover_prime = false;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
PD_T_PS_HARD_RESET);
@@ -4586,7 +5280,7 @@ static void run_state_machine(struct tcpm_port *port)
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
@@ -4603,35 +5297,53 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_ams_start(port, SOFT_RESET_AMS);
break;
case SOFT_RESET_SEND:
- port->message_id = 0;
- port->rx_msgid = -1;
- /* remove existing capabilities */
- usb_power_delivery_unregister_capabilities(port->partner_source_caps);
- port->partner_source_caps = NULL;
- if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
- tcpm_set_state_cond(port, hard_reset_state(port), 0);
- else
- tcpm_set_state_cond(port, hard_reset_state(port),
- PD_T_SENDER_RESPONSE);
+ /*
+ * Power Delivery 3.0 Section 6.3.13
+ *
+ * A Soft_Reset Message Shall be targeted at a specific entity
+ * depending on the type of SOP* packet used.
+ */
+ if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
+ port->message_id_prime = 0;
+ port->rx_msgid_prime = -1;
+ tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
+ tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
+ } else {
+ port->message_id = 0;
+ port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
+ if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
+ tcpm_set_state_cond(port, hard_reset_state(port), 0);
+ else
+ tcpm_set_state_cond(port, hard_reset_state(port),
+ PD_T_SENDER_RESPONSE);
+ }
break;
/* DR_Swap states */
case DR_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
- if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+ tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
+ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
port->send_discover = true;
+ port->send_discover_prime = false;
+ }
tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case DR_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
- if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
+ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
port->send_discover = true;
+ port->send_discover_prime = false;
+ }
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
port->send_discover = false;
+ port->send_discover_prime = false;
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -4648,7 +5360,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case FR_SWAP_SEND:
- if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4668,7 +5380,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
tcpm_set_pwr_role(port, TYPEC_SOURCE);
- if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4678,11 +5390,11 @@ static void run_state_machine(struct tcpm_port *port)
/* PR_Swap states */
case PR_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_set_state(port, PR_SWAP_START, 0);
break;
case PR_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
+ tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4724,7 +5436,7 @@ static void run_state_machine(struct tcpm_port *port)
* supply is turned off"
*/
tcpm_set_pwr_role(port, TYPEC_SINK);
- if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4771,17 +5483,17 @@ static void run_state_machine(struct tcpm_port *port)
* Source."
*/
tcpm_set_pwr_role(port, TYPEC_SOURCE);
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
case VCONN_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_ams_finish(port);
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
+ tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4800,14 +5512,34 @@ static void run_state_machine(struct tcpm_port *port)
PD_T_VCONN_SOURCE_ON);
break;
case VCONN_SWAP_TURN_ON_VCONN:
- tcpm_set_vconn(port, true);
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
- tcpm_set_state(port, ready_state(port), 0);
+ ret = tcpm_set_vconn(port, true);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
+ /*
+ * USB PD 3.0 Section 6.4.4.3.1
+ *
+ * Note that a Cable Plug or VPD will not be ready for PD
+ * Communication until tVCONNStable after VCONN has been applied
+ */
+ if (!ret)
+ tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
+ PD_T_VCONN_STABLE);
+ else
+ tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
tcpm_set_state(port, ready_state(port), 0);
break;
+ case VCONN_SWAP_SEND_SOFT_RESET:
+ tcpm_swap_complete(port, port->swap_status);
+ if (tcpm_can_communicate_sop_prime(port)) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ port->upcoming_state = SOFT_RESET_SEND;
+ tcpm_ams_start(port, SOFT_RESET_AMS);
+ } else {
+ tcpm_set_state(port, ready_state(port), 0);
+ }
+ break;
case DR_SWAP_CANCEL:
case PR_SWAP_CANCEL:
@@ -4843,7 +5575,7 @@ static void run_state_machine(struct tcpm_port *port)
}
break;
case GET_STATUS_SEND:
- tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
+ tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4851,7 +5583,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_PPS_STATUS_SEND:
- tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
+ tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4859,7 +5591,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_SINK_CAP:
- tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
+ tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
break;
case GET_SINK_CAP_TIMEOUT:
@@ -4902,9 +5634,18 @@ static void run_state_machine(struct tcpm_port *port)
/* Chunk state */
case CHUNK_NOT_SUPP:
- tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
break;
+
+ /* Cable states */
+ case SRC_VDM_IDENTITY_REQUEST:
+ port->send_discover_prime = true;
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ mod_send_discover_delayed_work(port, 0);
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ break;
+
default:
WARN(1, "Unexpected port state %d\n", port->state);
break;
@@ -5596,7 +6337,8 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
goto unlock;
/* Send when the state machine is idle */
- if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
+ if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
+ port->send_discover_prime)
goto resched;
port->upcoming_state = GET_SINK_CAP;
@@ -5619,21 +6361,23 @@ static void tcpm_send_discover_work(struct kthread_work *work)
mutex_lock(&port->lock);
/* No need to send DISCOVER_IDENTITY anymore */
- if (!port->send_discover)
+ if (!port->send_discover && !port->send_discover_prime)
goto unlock;
if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
port->send_discover = false;
+ port->send_discover_prime = false;
goto unlock;
}
/* Retry if the port is not idle */
- if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+ if ((port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
goto unlock;
}
- tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
unlock:
mutex_unlock(&port->lock);
@@ -6860,6 +7604,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
+ typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
+ &tcpm_cable_ops);
port->registered = true;
mutex_lock(&port->lock);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 87d4abde0ea2..cf719307b3f6 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -535,7 +535,7 @@ static irqreturn_t wcove_typec_irq(int irq, void *data)
goto err;
}
- tcpm_pd_receive(wcove->tcpm, &msg);
+ tcpm_pd_receive(wcove->tcpm, &msg, TCPC_TX_SOP);
ret = regmap_read(wcove->regmap, USBC_RXSTATUS,
&status);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 14f5a7bfae2e..cf52cb34d285 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -36,6 +36,19 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
+static int ucsi_read_message_in(struct ucsi *ucsi, void *buf,
+ size_t buf_size)
+{
+ /*
+ * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
+ * reads here.
+ */
+ if (ucsi->version <= UCSI_VERSION_1_2)
+ buf_size = clamp(buf_size, 0, 16);
+
+ return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size);
+}
+
static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
u64 ctrl;
@@ -72,7 +85,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
if (ret < 0)
return ret;
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
+ ret = ucsi_read_message_in(ucsi, &error, sizeof(error));
if (ret)
return ret;
@@ -170,7 +183,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
length = ret;
if (data) {
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
+ ret = ucsi_read_message_in(ucsi, data, size);
if (ret)
goto out;
}
@@ -386,6 +399,27 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
con->partner_altmode[i] = alt;
break;
+ case UCSI_RECIPIENT_SOP_P:
+ i = ucsi_next_altmode(con->plug_altmode);
+ if (i < 0) {
+ ret = i;
+ goto err;
+ }
+
+ ret = ucsi_altmode_next_mode(con->plug_altmode, desc->svid);
+ if (ret < 0)
+ return ret;
+
+ desc->mode = ret;
+
+ alt = typec_plug_register_altmode(con->plug, desc);
+ if (IS_ERR(alt)) {
+ ret = PTR_ERR(alt);
+ goto err;
+ }
+
+ con->plug_altmode[i] = alt;
+ break;
default:
return -EINVAL;
}
@@ -553,6 +587,9 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
case UCSI_RECIPIENT_SOP:
adev = con->partner_altmode;
break;
+ case UCSI_RECIPIENT_SOP_P:
+ adev = con->plug_altmode;
+ break;
default:
return;
}
@@ -633,6 +670,108 @@ static int ucsi_get_src_pdos(struct ucsi_connector *con)
return ret;
}
+static int ucsi_read_identity(struct ucsi_connector *con, u8 recipient,
+ u8 offset, u8 bytes, void *resp)
+{
+ struct ucsi *ucsi = con->ucsi;
+ u64 command;
+ int ret;
+
+ command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) |
+ UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
+ command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
+ command |= UCSI_GET_PD_MESSAGE_BYTES(bytes);
+ command |= UCSI_GET_PD_MESSAGE_TYPE(UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
+
+ ret = ucsi_send_command(ucsi, command, resp, bytes);
+ if (ret < 0)
+ dev_err(ucsi->dev, "UCSI_GET_PD_MESSAGE failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int ucsi_get_identity(struct ucsi_connector *con, u8 recipient,
+ struct usb_pd_identity *id)
+{
+ struct ucsi *ucsi = con->ucsi;
+ struct ucsi_pd_message_disc_id resp = {};
+ int ret;
+
+ if (ucsi->version < UCSI_VERSION_2_0) {
+ /*
+ * Before UCSI v2.0, MESSAGE_IN is 16 bytes which cannot fit
+ * the 28 byte identity response including the VDM header.
+ * First request the VDM header, ID Header VDO, Cert Stat VDO
+ * and Product VDO.
+ */
+ ret = ucsi_read_identity(con, recipient, 0, 0x10, &resp);
+ if (ret < 0)
+ return ret;
+
+
+ /* Then request Product Type VDO1 through Product Type VDO3. */
+ ret = ucsi_read_identity(con, recipient, 0x10, 0xc,
+ &resp.vdo[0]);
+ if (ret < 0)
+ return ret;
+
+ } else {
+ /*
+ * In UCSI v2.0 and after, MESSAGE_IN is large enough to request
+ * the large enough to request the full Discover Identity
+ * response at once.
+ */
+ ret = ucsi_read_identity(con, recipient, 0x0, 0x1c, &resp);
+ if (ret < 0)
+ return ret;
+ }
+
+ id->id_header = resp.id_header;
+ id->cert_stat = resp.cert_stat;
+ id->product = resp.product;
+ id->vdo[0] = resp.vdo[0];
+ id->vdo[1] = resp.vdo[1];
+ id->vdo[2] = resp.vdo[2];
+ return 0;
+}
+
+static int ucsi_get_partner_identity(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP,
+ &con->partner_identity);
+ if (ret < 0)
+ return ret;
+
+ ret = typec_partner_set_identity(con->partner);
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int ucsi_get_cable_identity(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP_P,
+ &con->cable_identity);
+ if (ret < 0)
+ return ret;
+
+ ret = typec_cable_set_identity(con->cable);
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
static int ucsi_check_altmodes(struct ucsi_connector *con)
{
int ret, num_partner_am;
@@ -721,6 +860,82 @@ static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
con->partner_pd = NULL;
}
+static int ucsi_register_plug(struct ucsi_connector *con)
+{
+ struct typec_plug *plug;
+ struct typec_plug_desc desc = {.index = TYPEC_PLUG_SOP_P};
+
+ plug = typec_register_plug(con->cable, &desc);
+ if (IS_ERR(plug)) {
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register plug (%ld)\n", con->num,
+ PTR_ERR(plug));
+ return PTR_ERR(plug);
+ }
+
+ con->plug = plug;
+ return 0;
+}
+
+static void ucsi_unregister_plug(struct ucsi_connector *con)
+{
+ if (!con->plug)
+ return;
+
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ typec_unregister_plug(con->plug);
+ con->plug = NULL;
+}
+
+static int ucsi_register_cable(struct ucsi_connector *con)
+{
+ struct typec_cable *cable;
+ struct typec_cable_desc desc = {};
+
+ switch (UCSI_CABLE_PROP_FLAG_PLUG_TYPE(con->cable_prop.flags)) {
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_A:
+ desc.type = USB_PLUG_TYPE_A;
+ break;
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_B:
+ desc.type = USB_PLUG_TYPE_B;
+ break;
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_C:
+ desc.type = USB_PLUG_TYPE_C;
+ break;
+ default:
+ desc.type = USB_PLUG_NONE;
+ break;
+ }
+
+ desc.identity = &con->cable_identity;
+ desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE &
+ con->cable_prop.flags);
+ desc.pd_revision = UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(
+ con->cable_prop.flags);
+
+ cable = typec_register_cable(con->port, &desc);
+ if (IS_ERR(cable)) {
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register cable (%ld)\n", con->num,
+ PTR_ERR(cable));
+ return PTR_ERR(cable);
+ }
+
+ con->cable = cable;
+ return 0;
+}
+
+static void ucsi_unregister_cable(struct ucsi_connector *con)
+{
+ if (!con->cable)
+ return;
+
+ ucsi_unregister_plug(con);
+ typec_unregister_cable(con->cable);
+ memset(&con->cable_identity, 0, sizeof(con->cable_identity));
+ con->cable = NULL;
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
@@ -768,7 +983,9 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
+ desc.identity = &con->partner_identity;
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.pd_revision = UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags);
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -793,7 +1010,9 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
typec_partner_set_usb_power_delivery(con->partner, NULL);
ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
+ ucsi_unregister_cable(con);
typec_unregister_partner(con->partner);
+ memset(&con->partner_identity, 0, sizeof(con->partner_identity));
con->partner = NULL;
}
@@ -843,6 +1062,27 @@ static void ucsi_partner_change(struct ucsi_connector *con)
con->num, u_role);
}
+static int ucsi_check_connector_capability(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (!con->partner || con->ucsi->version < UCSI_VERSION_2_0)
+ return 0;
+
+ command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
+ return ret;
+ }
+
+ typec_partner_set_pd_revision(con->partner,
+ UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags));
+
+ return ret;
+}
+
static int ucsi_check_connection(struct ucsi_connector *con)
{
u8 prev_flags = con->status.flags;
@@ -872,6 +1112,42 @@ static int ucsi_check_connection(struct ucsi_connector *con)
return 0;
}
+static int ucsi_check_cable(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (con->cable)
+ return 0;
+
+ command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cable_prop,
+ sizeof(con->cable_prop));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = ucsi_register_cable(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_get_cable_identity(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_register_plug(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
@@ -912,6 +1188,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
+ ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_cable, 1, HZ);
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
UCSI_CONSTAT_PWR_OPMODE_PD)
@@ -1310,6 +1589,8 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
+ ucsi_get_partner_identity(con);
+ ucsi_check_cable(con);
}
/* Only notify USB controller if partner supports USB data */
@@ -1558,6 +1839,15 @@ int ucsi_register(struct ucsi *ucsi)
if (!ucsi->version)
return -ENODEV;
+ /*
+ * Version format is JJ.M.N (JJ = Major version, M = Minor version,
+ * N = sub-minor version).
+ */
+ dev_dbg(ucsi->dev, "Registered UCSI interface with version %x.%x.%x",
+ UCSI_BCD_GET_MAJOR(ucsi->version),
+ UCSI_BCD_GET_MINOR(ucsi->version),
+ UCSI_BCD_GET_SUBMINOR(ucsi->version));
+
queue_delayed_work(system_long_wq, &ucsi->work, 0);
ucsi_debugfs_register(ucsi);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6478016d5cb8..32daf5f58650 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,6 +10,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
+#include <asm/unaligned.h>
/* -------------------------------------------------------------------------- */
@@ -23,6 +24,23 @@ struct dentry;
#define UCSI_CONTROL 8
#define UCSI_MESSAGE_IN 16
#define UCSI_MESSAGE_OUT 32
+#define UCSIv2_MESSAGE_OUT 272
+
+/* UCSI versions */
+#define UCSI_VERSION_1_2 0x0120
+#define UCSI_VERSION_2_0 0x0200
+#define UCSI_VERSION_2_1 0x0210
+#define UCSI_VERSION_3_0 0x0300
+
+#define UCSI_BCD_GET_MAJOR(_v_) (((_v_) >> 8) & 0xFF)
+#define UCSI_BCD_GET_MINOR(_v_) (((_v_) >> 4) & 0x0F)
+#define UCSI_BCD_GET_SUBMINOR(_v_) ((_v_) & 0x0F)
+
+/*
+ * Per USB PD 3.2, Section 6.2.1.1.5, the spec revision is represented by 2 bits
+ * 0b00 = 1.0, 0b01 = 2.0, 0b10 = 3.0, 0b11 = Reserved, Shall NOT be used.
+ */
+#define UCSI_SPEC_REVISION_TO_BCD(_v_) (((_v_) + 1) << 8)
/* Command Status and Connector Change Indication (CCI) bits */
#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
@@ -88,6 +106,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CABLE_PROPERTY 0x11
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
+#define UCSI_GET_PD_MESSAGE 0x15
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
@@ -141,6 +160,18 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_MAX_PDOS (4)
#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
+/* GET_PD_MESSAGE command bits */
+#define UCSI_GET_PD_MESSAGE_RECIPIENT(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PD_MESSAGE_OFFSET(_r_) ((u64)(_r_) << 26)
+#define UCSI_GET_PD_MESSAGE_BYTES(_r_) ((u64)(_r_) << 34)
+#define UCSI_GET_PD_MESSAGE_TYPE(_r_) ((u64)(_r_) << 42)
+#define UCSI_GET_PD_MESSAGE_TYPE_SNK_CAP_EXT 0
+#define UCSI_GET_PD_MESSAGE_TYPE_SRC_CAP_EXT 1
+#define UCSI_GET_PD_MESSAGE_TYPE_BAT_CAP 2
+#define UCSI_GET_PD_MESSAGE_TYPE_BAT_STAT 3
+#define UCSI_GET_PD_MESSAGE_TYPE_IDENTITY 4
+#define UCSI_GET_PD_MESSAGE_TYPE_REVISION 5
+
/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
@@ -203,9 +234,29 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 flags;
+ u32 flags;
#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
+#define UCSI_CONCAP_FLAG_SWAP_TO_DFP BIT(2)
+#define UCSI_CONCAP_FLAG_SWAP_TO_UFP BIT(3)
+#define UCSI_CONCAP_FLAG_SWAP_TO_SRC BIT(4)
+#define UCSI_CONCAP_FLAG_SWAP_TO_SINK BIT(5)
+#define UCSI_CONCAP_FLAG_EX_OP_MODE(_f_) \
+ (((_f_) & GENMASK(13, 6)) >> 6)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN2 BIT(0)
+#define UCSI_CONCAP_EX_OP_MODE_EPR_SRC BIT(1)
+#define UCSI_CONCAP_EX_OP_MODE_EPR_SINK BIT(2)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN3 BIT(3)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN4 BIT(4)
+#define UCSI_CONCAP_FLAG_MISC_CAPS(_f_) \
+ (((_f_) & GENMASK(17, 14)) >> 14)
+#define UCSI_CONCAP_MISC_CAP_FW_UPDATE BIT(0)
+#define UCSI_CONCAP_MISC_CAP_SECURITY BIT(1)
+#define UCSI_CONCAP_FLAG_REV_CURR_PROT_SUPPORT BIT(18)
+#define UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV(_f_) \
+ (((_f_) & GENMASK(20, 19)) >> 19)
+#define UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(_f_) \
+ UCSI_SPEC_REVISION_TO_BCD(UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV(_f_))
} __packed;
struct ucsi_altmode {
@@ -221,12 +272,15 @@ struct ucsi_cable_property {
#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3)
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV(_f_) (((_f_) & GENMASK(7, 6)) >> 6)
+#define UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(_f_) \
+ UCSI_SPEC_REVISION_TO_BCD(UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV(_f_))
u8 latency;
} __packed;
@@ -265,15 +319,48 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 pwr_status;
-#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+
+ u8 pwr_status[3];
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_[0]) & GENMASK(1, 0))
#define UCSI_CONSTAT_BC_NOT_CHARGING 0
#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
-#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_[0]) & GENMASK(5, 2)) >> 2)
#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
+#define UCSI_CONSTAT_PROVIDER_PD_VERSION_OPER_MODE(_p_) \
+ ((get_unaligned_le32(_p_) & GENMASK(21, 6)) >> 6)
+#define UCSI_CONSTAT_ORIENTATION(_p_) (((_p_[2]) & GENMASK(6, 6)) >> 6)
+#define UCSI_CONSTAT_ORIENTATION_DIRECT 0
+#define UCSI_CONSTAT_ORIENTATION_FLIPPED 1
+#define UCSI_CONSTAT_SINK_PATH_STATUS(_p_) (((_p_[2]) & GENMASK(7, 7)) >> 7)
+#define UCSI_CONSTAT_SINK_PATH_DISABLED 0
+#define UCSI_CONSTAT_SINK_PATH_ENABLED 1
+ u8 pwr_readings[9];
+#define UCSI_CONSTAT_REV_CURR_PROT_STATUS(_p_) ((_p_[0]) & 0x1)
+#define UCSI_CONSTAT_PWR_READING_VALID(_p_) (((_p_[0]) & GENMASK(1, 1)) >> 1)
+#define UCSI_CONSTAT_CURRENT_SCALE(_p_) (((_p_[0]) & GENMASK(4, 2)) >> 2)
+#define UCSI_CONSTAT_PEAK_CURRENT(_p_) \
+ ((get_unaligned_le32(_p_) & GENMASK(20, 5)) >> 5)
+#define UCSI_CONSTAT_AVG_CURRENT(_p_) \
+ ((get_unaligned_le32(&(_p_)[2]) & GENMASK(20, 5)) >> 5)
+#define UCSI_CONSTAT_VOLTAGE_SCALE(_p_) \
+ ((get_unaligned_le16(&(_p_)[4]) & GENMASK(8, 5)) >> 5)
+#define UCSI_CONSTAT_VOLTAGE_READING(_p_) \
+ ((get_unaligned_le32(&(_p_)[5]) & GENMASK(16, 1)) >> 1)
+} __packed;
+
+/*
+ * Data structure filled by PPM in response to GET_PD_MESSAGE command with the
+ * Response Message Type set to Discover Identity Response.
+ */
+struct ucsi_pd_message_disc_id {
+ u32 vdm_header;
+ u32 id_header;
+ u32 cert_stat;
+ u32 product;
+ u32 vdo[3];
} __packed;
/* -------------------------------------------------------------------------- */
@@ -341,14 +428,18 @@ struct ucsi_connector {
struct typec_port *port;
struct typec_partner *partner;
+ struct typec_cable *cable;
+ struct typec_plug *plug;
struct typec_altmode *port_altmode[UCSI_MAX_ALTMODES];
struct typec_altmode *partner_altmode[UCSI_MAX_ALTMODES];
+ struct typec_altmode *plug_altmode[UCSI_MAX_ALTMODES];
struct typec_capability typec_cap;
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
+ struct ucsi_cable_property cable_prop;
struct power_supply *psy;
struct power_supply_desc psy_desc;
u32 rdo;
@@ -364,6 +455,10 @@ struct ucsi_connector {
struct usb_power_delivery_capabilities *partner_sink_caps;
struct usb_role_switch *usb_role_sw;
+
+ /* USB PD identity */
+ struct usb_pd_identity partner_identity;
+ struct usb_pd_identity cable_identity;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 449c125f6f87..dda7c7c94e08 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -192,6 +192,12 @@ struct ucsi_ccg_altmode {
bool checked;
} __packed;
+#define CCGX_MESSAGE_IN_MAX 4
+struct op_region {
+ __le32 cci;
+ __le32 message_in[CCGX_MESSAGE_IN_MAX];
+};
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
@@ -222,6 +228,13 @@ struct ucsi_ccg {
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
+
+ /*
+ * This spinlock protects op_data which includes CCI and MESSAGE_IN that
+ * will be updated in ISR
+ */
+ spinlock_t op_lock;
+ struct op_region op_data;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -305,12 +318,42 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
return 0;
}
+static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci)
+{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN);
+ struct op_region *data = &uc->op_data;
+ unsigned char *buf;
+ size_t size = sizeof(data->message_in);
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+ if (UCSI_CCI_LENGTH(cci)) {
+ int ret = ccg_read(uc, reg, (void *)buf, size);
+
+ if (ret) {
+ kfree(buf);
+ return ret;
+ }
+ }
+
+ spin_lock(&uc->op_lock);
+ data->cci = cpu_to_le32(cci);
+ if (UCSI_CCI_LENGTH(cci))
+ memcpy(&data->message_in, buf, size);
+ spin_unlock(&uc->op_lock);
+ kfree(buf);
+ return 0;
+}
+
static int ucsi_ccg_init(struct ucsi_ccg *uc)
{
unsigned int count = 10;
u8 data;
int status;
+ spin_lock_init(&uc->op_lock);
+
data = CCGX_RAB_UCSI_CONTROL_STOP;
status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
if (status < 0)
@@ -520,9 +563,20 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
struct ucsi_capability *cap;
struct ucsi_altmode *alt;
- int ret;
+ int ret = 0;
+
+ if (offset == UCSI_CCI) {
+ spin_lock(&uc->op_lock);
+ memcpy(val, &(uc->op_data).cci, val_len);
+ spin_unlock(&uc->op_lock);
+ } else if (offset == UCSI_MESSAGE_IN) {
+ spin_lock(&uc->op_lock);
+ memcpy(val, &(uc->op_data).message_in, val_len);
+ spin_unlock(&uc->op_lock);
+ } else {
+ ret = ccg_read(uc, reg, val, val_len);
+ }
- ret = ccg_read(uc, reg, val, val_len);
if (ret)
return ret;
@@ -559,9 +613,18 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
+ /*
+ * UCSI may read CCI instantly after async_write,
+ * clear CCI to avoid caller getting wrong data before we get CCI from ISR
+ */
+ spin_lock(&uc->op_lock);
+ uc->op_data.cci = 0;
+ spin_unlock(&uc->op_lock);
+
+ return ccg_write(uc, reg, val, val_len);
}
static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
@@ -615,13 +678,18 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
u8 intr_reg;
- u32 cci;
- int ret;
+ u32 cci = 0;
+ int ret = 0;
ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
if (ret)
return ret;
+ if (!intr_reg)
+ return IRQ_HANDLED;
+ else if (!(intr_reg & UCSI_READ_INT))
+ goto err_clear_irq;
+
ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
if (ret)
goto err_clear_irq;
@@ -629,13 +697,21 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
- complete(&uc->complete);
+ /*
+ * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN
+ * to the OpRegion before clear the UCSI interrupt
+ */
+ ret = ccg_op_region_update(uc, cci);
+ if (ret)
+ goto err_clear_irq;
err_clear_irq:
ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (!ret && test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
+
return IRQ_HANDLED;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index faccc942b381..932e7bf69447 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -298,6 +298,7 @@ static void pmic_glink_ucsi_destroy(void *data)
}
static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
+ { .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },