summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 02:01:28 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 02:01:28 +0300
commit1b6115fbe3b3db746d7baa11399dd617fc75e1c4 (patch)
treed84d690504f0872eb8f5a1b18ed251236872fae5 /drivers/pci
parentad0835a93008e5901415a0a27847d6a27649aa3a (diff)
parent91f3140fdef61074515470243370d92a45f0b516 (diff)
downloadlinux-1b6115fbe3b3db746d7baa11399dd617fc75e1c4.tar.xz
Merge tag 'pci-v4.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas: - detach driver before tearing down procfs/sysfs (Alex Williamson) - disable PCIe services during shutdown (Sinan Kaya) - fix ASPM oops on systems with no Root Ports (Ard Biesheuvel) - fix ASPM LTR_L1.2_THRESHOLD programming (Bjorn Helgaas) - fix ASPM Common_Mode_Restore_Time computation (Bjorn Helgaas) - fix portdrv MSI/MSI-X vector allocation (Dongdong Liu, Bjorn Helgaas) - report non-fatal AER errors only to the affected endpoint (Gabriele Paoloni) - distribute bus numbers, MMIO, and I/O space among hotplug bridges to allow more devices to be hot-added (Mika Westerberg) - fix pciehp races during initialization and surprise link down (Mika Westerberg) - handle surprise-removed devices in PME handling (Qiang) - support resizable BARs for large graphics devices (Christian König) - expose SR-IOV offset, stride, and VF device ID via sysfs (Filippo Sironi) - create SR-IOV virtfn/physfn sysfs links before attaching driver (Stuart Hayes) - fix SR-IOV "ARI Capable Hierarchy" restore issue (Tony Nguyen) - enforce Kconfig IOV/REALLOC dependency (Sascha El-Sharkawy) - avoid slot reset if bridge itself is broken (Jan Glauber) - clean up pci_reset_function() path (Jan H. Schönherr) - make pci_map_rom() fail if the option ROM is invalid (Changbin Du) - convert timers to timer_setup() (Kees Cook) - move PCI_QUIRKS to PCI bus Kconfig menu (Randy Dunlap) - constify pci_dev_type and intel_mid_pci_ops (Bhumika Goyal) - remove unnecessary pci_dev, pci_bus, resource, pcibios_set_master() declarations (Bjorn Helgaas) - fix endpoint framework overflows and BUG()s (Dan Carpenter) - fix endpoint framework issues (Kishon Vijay Abraham I) - avoid broken Cavium CN8xxx bus reset behavior (David Daney) - extend Cavium ACS capability quirks (Vadim Lomovtsev) - support Synopsys DesignWare RC in ECAM mode (Ard Biesheuvel) - turn off dra7xx clocks cleanly on shutdown (Keerthy) - fix Faraday probe error path (Wei Yongjun) - support HiSilicon STB SoC PCIe host controller (Jianguo Sun) - fix Hyper-V interrupt affinity issue (Dexuan Cui) - remove useless ACPI warning for Hyper-V pass-through devices (Vitaly Kuznetsov) - support multiple MSI on iProc (Sandor Bodo-Merle) - support Layerscape LS1012a and LS1046a PCIe host controllers (Hou Zhiqiang) - fix Layerscape default error response (Minghuan Lian) - support MSI on Tango host controller (Marc Gonzalez) - support Tegra186 PCIe host controller (Manikanta Maddireddy) - use generic accessors on Tegra when possible (Thierry Reding) - support V3 Semiconductor PCI host controller (Linus Walleij) * tag 'pci-v4.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (85 commits) PCI/ASPM: Add L1 Substates definitions PCI/ASPM: Reformat ASPM register definitions PCI/ASPM: Use correct capability pointer to program LTR_L1.2_THRESHOLD PCI/ASPM: Account for downstream device's Port Common_Mode_Restore_Time PCI: xgene: Rename xgene_pcie_probe_bridge() to xgene_pcie_probe() PCI: xilinx: Rename xilinx_pcie_link_is_up() to xilinx_pcie_link_up() PCI: altera: Rename altera_pcie_link_is_up() to altera_pcie_link_up() PCI: Fix kernel-doc build warning PCI: Fail pci_map_rom() if the option ROM is invalid PCI: Move pci_map_rom() error path PCI: Move PCI_QUIRKS to the PCI bus menu alpha/PCI: Make pdev_save_srm_config() static PCI: Remove unused declarations PCI: Remove redundant pci_dev, pci_bus, resource declarations PCI: Remove redundant pcibios_set_master() declarations PCI/PME: Handle invalid data when reading Root Status PCI: hv: Use effective affinity mask PCI: pciehp: Do not clear Presence Detect Changed during initialization PCI: pciehp: Fix race condition handling surprise link down PCI: Distribute available resources to hotplug-capable bridges ...
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig17
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dwc/Kconfig10
-rw-r--r--drivers/pci/dwc/Makefile1
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c17
-rw-r--r--drivers/pci/dwc/pci-layerscape.c12
-rw-r--r--drivers/pci/dwc/pcie-histb.c470
-rw-r--r--drivers/pci/host/Kconfig6
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-ftpci100.c22
-rw-r--r--drivers/pci/host/pci-host-generic.c43
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c20
-rw-r--r--drivers/pci/host/pci-tegra.c158
-rw-r--r--drivers/pci/host/pci-v3-semi.c959
-rw-r--r--drivers/pci/host/pci-xgene.c24
-rw-r--r--drivers/pci/host/pcie-altera.c8
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c19
-rw-r--r--drivers/pci/host/pcie-iproc.c20
-rw-r--r--drivers/pci/host/pcie-rcar.c20
-rw-r--r--drivers/pci/host/pcie-tango.c205
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/hotplug-pci.c29
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c7
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c19
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c19
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c25
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c11
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c9
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c6
-rw-r--r--drivers/pci/iov.c34
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-sysfs.c35
-rw-r--r--drivers/pci/pci.c154
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c9
-rw-r--r--drivers/pci/pcie/aspm.c46
-rw-r--r--drivers/pci/pcie/pme.c5
-rw-r--r--drivers/pci/pcie/portdrv_core.c171
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/probe.c187
-rw-r--r--drivers/pci/quirks.c42
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/rom.c19
-rw-r--r--drivers/pci/setup-bus.c299
-rw-r--r--drivers/pci/setup-res.c58
-rw-r--r--drivers/pci/switch/switchtec.c2
51 files changed, 2825 insertions, 453 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c32a77fc8b03..90944667ccea 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -29,6 +29,15 @@ config PCI_MSI_IRQ_DOMAIN
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
+config PCI_QUIRKS
+ default y
+ bool "Enable PCI quirk workarounds" if EXPERT
+ depends on PCI
+ help
+ This enables workarounds for various PCI chipset bugs/quirks.
+ Disable this only if your target machine is unaffected by PCI
+ quirks.
+
config PCI_DEBUG
bool "PCI Debugging"
depends on PCI && DEBUG_KERNEL
@@ -42,13 +51,13 @@ config PCI_DEBUG
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
depends on PCI
+ depends on PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
re-allocation needs to be enabled. You can always use pci=realloc=on
- or pci=realloc=off to override it. Note this feature is a no-op
- unless PCI_IOV support is also enabled; in that case it will
- automatically re-allocate PCI resources if SR-IOV BARs have not
- been allocated by the BIOS.
+ or pci=realloc=off to override it. It will automatically
+ re-allocate PCI resources if SR-IOV BARs have not been allocated by
+ the BIOS.
When in doubt, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 80adbdbcecce..3d5e047f0a32 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -17,9 +17,6 @@ obj-$(CONFIG_PCIEPORTBUS) += pcie/
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
-ifdef CONFIG_HOTPLUG_PCI
-obj-y += hotplug-pci.o
-endif
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 22ec82fcdea2..113e09440f85 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -169,4 +169,14 @@ config PCIE_KIRIN
Say Y here if you want PCIe controller support
on HiSilicon Kirin series SoCs.
+config PCIE_HISI_STB
+ bool "HiSilicon STB SoCs PCIe controllers"
+ depends on ARCH_HISI
+ depends on PCI
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIEPORTBUS
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index e73661182da0..41ba499c96ee 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 362607f727ee..e77a4ceed74c 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -810,6 +810,22 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
}
#endif
+void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ dra7xx_pcie_stop_link(dra7xx->pci);
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+}
+
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
@@ -823,5 +839,6 @@ static struct platform_driver dra7xx_pcie_driver = {
.suppress_bind_attrs = true,
.pm = &dra7xx_pcie_pm_ops,
},
+ .shutdown = dra7xx_pcie_shutdown,
};
builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 87fa486bee2c..8f34c2fdc600 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -33,6 +33,8 @@
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
#define PCIE_IATU_NUM 6
@@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pcie *pci)
return 1;
}
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
static int ls_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie_port *pp)
* dw_pcie_setup_rc() will reconfigure the outbound windows.
*/
ls_pcie_disable_outbound_atus(pcie);
+ ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
ls_pcie_clear_multifunction(pcie);
@@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drvdata = {
};
static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
diff --git a/drivers/pci/dwc/pcie-histb.c b/drivers/pci/dwc/pcie-histb.c
new file mode 100644
index 000000000000..33b01b734d7d
--- /dev/null
+++ b/drivers/pci/dwc/pcie-histb.c
@@ -0,0 +1,470 @@
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ * Jianguo Sun <sunjianguo1@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0 0x0000
+#define PCIE_SYS_CTRL1 0x0004
+#define PCIE_SYS_CTRL7 0x001C
+#define PCIE_SYS_CTRL13 0x0034
+#define PCIE_SYS_CTRL15 0x003C
+#define PCIE_SYS_CTRL16 0x0040
+#define PCIE_SYS_CTRL17 0x0044
+
+#define PCIE_SYS_STAT0 0x0100
+#define PCIE_SYS_STAT4 0x0110
+
+#define PCIE_RDLH_LINK_UP BIT(5)
+#define PCIE_XMLH_LINK_UP BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+#define PCIE_APP_LTSSM_ENABLE BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28)
+#define PCIE_WM_EP 0
+#define PCIE_WM_LEGACY BIT(1)
+#define PCIE_WM_RC BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE 0x11
+
+struct histb_pcie {
+ struct dw_pcie *pci;
+ struct clk *aux_clk;
+ struct clk *pipe_clk;
+ struct clk *sys_clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct reset_control *soft_reset;
+ struct reset_control *sys_reset;
+ struct reset_control *bus_reset;
+ void __iomem *ctrl;
+ int reset_gpio;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+ return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+ writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ u32 val;
+
+ histb_pcie_dbi_r_mode(&pci->pp, true);
+ dw_pcie_read(base + reg, size, &val);
+ histb_pcie_dbi_r_mode(&pci->pp, false);
+
+ return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ histb_pcie_dbi_w_mode(&pci->pp, true);
+ dw_pcie_write(base + reg, size, val);
+ histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
+ int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ histb_pcie_dbi_r_mode(pp, true);
+ ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ histb_pcie_dbi_r_mode(pp, false);
+
+ return ret;
+}
+
+static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
+ int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ histb_pcie_dbi_w_mode(pp, true);
+ ret = dw_pcie_write(pci->dbi_base + where, size, val);
+ histb_pcie_dbi_w_mode(pp, false);
+
+ return ret;
+}
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+ u32 status;
+
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+ status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+ status &= PCIE_LTSSM_STATE_MASK;
+ if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE))
+ return 1;
+
+ return 0;
+}
+
+static int histb_pcie_establish_link(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ if (dw_pcie_link_up(pci)) {
+ dev_info(pci->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+ regval |= PCIE_APP_LTSSM_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static int histb_pcie_host_init(struct pcie_port *pp)
+{
+ histb_pcie_establish_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ return 0;
+}
+
+static struct dw_pcie_host_ops histb_pcie_host_ops = {
+ .rd_own_conf = histb_pcie_rd_own_conf,
+ .wr_own_conf = histb_pcie_wr_own_conf,
+ .host_init = histb_pcie_host_init,
+};
+
+static irqreturn_t histb_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_assert(hipcie->bus_reset);
+
+ clk_disable_unprepare(hipcie->aux_clk);
+ clk_disable_unprepare(hipcie->pipe_clk);
+ clk_disable_unprepare(hipcie->sys_clk);
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ if (gpio_is_valid(hipcie->reset_gpio))
+ gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+}
+
+static int histb_pcie_host_enable(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* power on PCIe device if have */
+ if (gpio_is_valid(hipcie->reset_gpio))
+ gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+
+ ret = clk_prepare_enable(hipcie->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable bus clk\n");
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->sys_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable sys clk\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clk\n");
+ goto err_pipe_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clk\n");
+ goto err_aux_clk;
+ }
+
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_deassert(hipcie->soft_reset);
+
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_deassert(hipcie->sys_reset);
+
+ reset_control_assert(hipcie->bus_reset);
+ reset_control_deassert(hipcie->bus_reset);
+
+ return 0;
+
+err_aux_clk:
+ clk_disable_unprepare(hipcie->aux_clk);
+err_pipe_clk:
+ clk_disable_unprepare(hipcie->pipe_clk);
+err_sys_clk:
+ clk_disable_unprepare(hipcie->sys_clk);
+err_bus_clk:
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = histb_pcie_read_dbi,
+ .write_dbi = histb_pcie_write_dbi,
+ .link_up = histb_pcie_link_up,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie;
+ struct dw_pcie *pci;
+ struct pcie_port *pp;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ enum of_gpio_flags of_flags;
+ unsigned long flag = GPIOF_DIR_OUT;
+ int ret;
+
+ hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+ if (!hipcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ hipcie->pci = pci;
+ pp = &pci->pp;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ hipcie->ctrl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hipcie->ctrl)) {
+ dev_err(dev, "cannot get control reg base\n");
+ return PTR_ERR(hipcie->ctrl);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base)) {
+ dev_err(dev, "cannot get rc-dbi base\n");
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ hipcie->reset_gpio = of_get_named_gpio_flags(np,
+ "reset-gpios", 0, &of_flags);
+ if (of_flags & OF_GPIO_ACTIVE_LOW)
+ flag |= GPIOF_ACTIVE_LOW;
+ if (gpio_is_valid(hipcie->reset_gpio)) {
+ ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
+ flag, "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to request gpio\n");
+ return ret;
+ }
+ }
+
+ hipcie->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(hipcie->aux_clk)) {
+ dev_err(dev, "Failed to get PCIe aux clk\n");
+ return PTR_ERR(hipcie->aux_clk);
+ }
+
+ hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(hipcie->pipe_clk)) {
+ dev_err(dev, "Failed to get PCIe pipe clk\n");
+ return PTR_ERR(hipcie->pipe_clk);
+ }
+
+ hipcie->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_clk)) {
+ dev_err(dev, "Failed to get PCIEe sys clk\n");
+ return PTR_ERR(hipcie->sys_clk);
+ }
+
+ hipcie->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_clk)) {
+ dev_err(dev, "Failed to get PCIe bus clk\n");
+ return PTR_ERR(hipcie->bus_clk);
+ }
+
+ hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+ if (IS_ERR(hipcie->soft_reset)) {
+ dev_err(dev, "couldn't get soft reset\n");
+ return PTR_ERR(hipcie->soft_reset);
+ }
+
+ hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_reset)) {
+ dev_err(dev, "couldn't get sys reset\n");
+ return PTR_ERR(hipcie->sys_reset);
+ }
+
+ hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_reset)) {
+ dev_err(dev, "couldn't get bus reset\n");
+ return PTR_ERR(hipcie->bus_reset);
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0) {
+ dev_err(dev, "Failed to get MSI IRQ\n");
+ return pp->msi_irq;
+ }
+
+ ret = devm_request_irq(dev, pp->msi_irq,
+ histb_pcie_msi_irq_handler,
+ IRQF_SHARED, "histb-pcie-msi", pp);
+ if (ret) {
+ dev_err(dev, "cannot request MSI IRQ\n");
+ return ret;
+ }
+ }
+
+ hipcie->phy = devm_phy_get(dev, "phy");
+ if (IS_ERR(hipcie->phy)) {
+ dev_info(dev, "no pcie-phy found\n");
+ hipcie->phy = NULL;
+ /* fall through here!
+ * if no pcie-phy found, phy init
+ * should be done under boot!
+ */
+ } else {
+ phy_init(hipcie->phy);
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &histb_pcie_host_ops;
+
+ platform_set_drvdata(pdev, hipcie);
+
+ ret = histb_pcie_host_enable(pp);
+ if (ret) {
+ dev_err(dev, "failed to enable host\n");
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int histb_pcie_remove(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+ histb_pcie_host_disable(hipcie);
+
+ if (hipcie->phy)
+ phy_exit(hipcie->phy);
+
+ return 0;
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+ { .compatible = "hisilicon,hi3798cv200-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+ .probe = histb_pcie_probe,
+ .remove = histb_pcie_remove,
+ .driver = {
+ .name = "histb-pcie",
+ .of_match_table = histb_pcie_of_match,
+ },
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index b868803792d8..38d12980db0f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -95,6 +95,12 @@ config PCI_XGENE_MSI
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+config PCI_V3_SEMI
+ bool "V3 Semiconductor PCI controller"
+ depends on OF
+ depends on ARM
+ default ARCH_INTEGRATOR_AP
+
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
depends on ARCH_VERSATILE
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 95f5b80ca52a..34ec1d88f961 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 4ea7d2ebcc5c..b9617d1c1d48 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -371,24 +371,6 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
struct device_node *np)
{
@@ -403,7 +385,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
- if (pci_dma_range_parser_init(&parser, np)) {
+ if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
@@ -482,7 +464,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
}
p->bus_clk = devm_clk_get(dev, "PCICLK");
if (IS_ERR(p->bus_clk))
- return PTR_ERR(clk);
+ return PTR_ERR(p->bus_clk);
ret = clk_prepare_enable(p->bus_clk);
if (ret) {
dev_err(dev, "could not prepare PCICLK\n");
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 7d709a7e0aa8..2f05511ce718 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -35,6 +35,40 @@ static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
}
};
+static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ /*
+ * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
+ * type 0 config TLPs sent to devices 1 and up on its downstream port,
+ * resulting in devices appearing multiple times on bus 0 unless we
+ * filter out those accesses here.
+ */
+ if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0)
+ return false;
+
+ return true;
+}
+
+static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ if (!pci_dw_valid_device(bus, devfn))
+ return NULL;
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_dw_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-cam-generic",
.data = &gen_pci_cfg_cam_bus_ops },
@@ -42,6 +76,15 @@ static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-ecam-generic",
.data = &pci_generic_ecam_ops },
+ { .compatible = "marvell,armada8k-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "socionext,synquacer-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "snps,dw-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
{ },
};
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 0fe3ea164ee5..04dac6a42c9f 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -879,7 +879,7 @@ static void hv_irq_unmask(struct irq_data *data)
int cpu;
u64 res;
- dest = irq_data_get_affinity_mask(data);
+ dest = irq_data_get_effective_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1042,6 +1042,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
+ struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct {
@@ -1056,6 +1057,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
int ret;
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
@@ -1081,14 +1083,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
switch (pci_protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
- irq_data_get_affinity_mask(data),
+ dest,
hpdev->desc.win_slot.slot,
cfg->vector);
break;
case PCI_PROTOCOL_VERSION_1_2:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
- irq_data_get_affinity_mask(data),
+ dest,
hpdev->desc.win_slot.slot,
cfg->vector);
break;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 6f879685fedd..e46de69f0380 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -293,24 +293,6 @@ static struct pci_ops rcar_pci_ops = {
.write = pci_generic_config_write,
};
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
@@ -320,7 +302,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
int index = 0;
/* Failure to parse is ok as we fall back to defaults */
- if (pci_dma_range_parser_init(&parser, np))
+ if (of_pci_dma_range_parser_init(&parser, np))
return 0;
/* Get the dma-ranges from DT */
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 1987fec1f126..f9d3960dc39f 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -159,10 +159,13 @@
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
#define AFI_FUSE 0x104
#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
@@ -253,6 +256,7 @@ struct tegra_pcie_soc {
bool has_cml_clk;
bool has_gen2;
bool force_pca_enable;
+ bool program_uphy;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -492,12 +496,32 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
return addr;
}
+static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_read32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_write32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
static struct pci_ops tegra_pcie_ops = {
.add_bus = tegra_pcie_add_bus,
.remove_bus = tegra_pcie_remove_bus,
.map_bus = tegra_pcie_map_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
+ .read = tegra_pcie_config_read,
+ .write = tegra_pcie_config_write,
};
static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -1013,10 +1037,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_FUSE);
}
- err = tegra_pcie_phy_power_on(pcie);
- if (err < 0) {
- dev_err(dev, "failed to power on PHY(s): %d\n", err);
- return err;
+ if (soc->program_uphy) {
+ err = tegra_pcie_phy_power_on(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
+ return err;
+ }
}
/* take the PCIe interface module out of reset */
@@ -1049,19 +1075,23 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
/* TODO: disable and unprepare clocks? */
- err = tegra_pcie_phy_power_off(pcie);
- if (err < 0)
- dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ if (soc->program_uphy) {
+ err = tegra_pcie_phy_power_off(pcie);
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ }
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
@@ -1078,19 +1108,29 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_err(dev, "failed to enable regulators: %d\n", err);
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk,
- pcie->pex_rst);
- if (err) {
- dev_err(dev, "powerup sequence failed: %d\n", err);
- return err;
+ if (dev->pm_domain) {
+ err = clk_prepare_enable(pcie->pex_clk);
+ if (err) {
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
+ return err;
+ }
+ reset_control_deassert(pcie->pex_rst);
+ } else {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
+ pcie->pex_clk,
+ pcie->pex_rst);
+ if (err) {
+ dev_err(dev, "powerup sequence failed: %d\n", err);
+ return err;
+ }
}
reset_control_deassert(pcie->afi_rst);
@@ -1263,6 +1303,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
err = tegra_pcie_clocks_get(pcie);
@@ -1277,10 +1318,12 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
- err = tegra_pcie_phys_get(pcie);
- if (err < 0) {
- dev_err(dev, "failed to get PHYs: %d\n", err);
- return err;
+ if (soc->program_uphy) {
+ err = tegra_pcie_phys_get(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to get PHYs: %d\n", err);
+ return err;
+ }
}
err = tegra_pcie_power_on(pcie);
@@ -1342,6 +1385,7 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
if (pcie->irq > 0)
@@ -1349,9 +1393,11 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
tegra_pcie_power_off(pcie);
- err = phy_exit(pcie->phy);
- if (err < 0)
- dev_err(dev, "failed to teardown PHY: %d\n", err);
+ if (soc->program_uphy) {
+ err = phy_exit(pcie->phy);
+ if (err < 0)
+ dev_err(dev, "failed to teardown PHY: %d\n", err);
+ }
return 0;
}
@@ -1606,8 +1652,32 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
- of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ switch (lanes) {
+ case 0x010004:
+ dev_info(dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
+ return 0;
+
+ case 0x010102:
+ dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+
+ case 0x010101:
+ dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
+ return 0;
+
+ default:
+ dev_info(dev, "wrong configuration updated in DT, "
+ "switching to default 2x1, 1x1, 1x1 "
+ "configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1727,7 +1797,20 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ pcie->num_supplies = 4;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "dvdd-pex";
+ pcie->supplies[i++].supply = "hvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pexctl-aud";
+ } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
pcie->num_supplies = 6;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
@@ -2066,6 +2149,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_cml_clk = false,
.has_gen2 = false,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2081,6 +2165,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_cml_clk = true,
.has_gen2 = false,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2095,6 +2180,7 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_cml_clk = true,
.has_gen2 = true,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra210_pcie = {
@@ -2109,9 +2195,27 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.has_cml_clk = true,
.has_gen2 = true,
.force_pca_enable = true,
+ .program_uphy = true,
+};
+
+static const struct tegra_pcie_soc tegra186_pcie = {
+ .num_ports = 3,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x80b880b8,
+ .pads_refclk_cfg1 = 0x000480b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = false,
+ .has_gen2 = true,
+ .force_pca_enable = false,
+ .program_uphy = false,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
new file mode 100644
index 000000000000..02f6e1e3a421
--- /dev/null
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -0,0 +1,959 @@
+/*
+ * Support for V3 Semiconductor PCI Local Bus to PCI Bridge
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the code from arch/arm/mach-integrator/pci_v3.c
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
+ *
+ * Contributors to the old driver include:
+ * Russell King <linux@armlinux.org.uk>
+ * David A. Rusling <david.rusling@linaro.org> (uHAL, ARM Firmware suite)
+ * Rob Herring <robh@kernel.org>
+ * Liviu Dudau <Liviu.Dudau@arm.com>
+ * Grant Likely <grant.likely@secretlab.ca>
+ * Arnd Bergmann <arnd@arndb.de>
+ * Bjorn Helgaas <bhelgaas@google.com>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#define V3_PCI_VENDOR 0x00000000
+#define V3_PCI_DEVICE 0x00000002
+#define V3_PCI_CMD 0x00000004
+#define V3_PCI_STAT 0x00000006
+#define V3_PCI_CC_REV 0x00000008
+#define V3_PCI_HDR_CFG 0x0000000C
+#define V3_PCI_IO_BASE 0x00000010
+#define V3_PCI_BASE0 0x00000014
+#define V3_PCI_BASE1 0x00000018
+#define V3_PCI_SUB_VENDOR 0x0000002C
+#define V3_PCI_SUB_ID 0x0000002E
+#define V3_PCI_ROM 0x00000030
+#define V3_PCI_BPARAM 0x0000003C
+#define V3_PCI_MAP0 0x00000040
+#define V3_PCI_MAP1 0x00000044
+#define V3_PCI_INT_STAT 0x00000048
+#define V3_PCI_INT_CFG 0x0000004C
+#define V3_LB_BASE0 0x00000054
+#define V3_LB_BASE1 0x00000058
+#define V3_LB_MAP0 0x0000005E
+#define V3_LB_MAP1 0x00000062
+#define V3_LB_BASE2 0x00000064
+#define V3_LB_MAP2 0x00000066
+#define V3_LB_SIZE 0x00000068
+#define V3_LB_IO_BASE 0x0000006E
+#define V3_FIFO_CFG 0x00000070
+#define V3_FIFO_PRIORITY 0x00000072
+#define V3_FIFO_STAT 0x00000074
+#define V3_LB_ISTAT 0x00000076
+#define V3_LB_IMASK 0x00000077
+#define V3_SYSTEM 0x00000078
+#define V3_LB_CFG 0x0000007A
+#define V3_PCI_CFG 0x0000007C
+#define V3_DMA_PCI_ADR0 0x00000080
+#define V3_DMA_PCI_ADR1 0x00000090
+#define V3_DMA_LOCAL_ADR0 0x00000084
+#define V3_DMA_LOCAL_ADR1 0x00000094
+#define V3_DMA_LENGTH0 0x00000088
+#define V3_DMA_LENGTH1 0x00000098
+#define V3_DMA_CSR0 0x0000008B
+#define V3_DMA_CSR1 0x0000009B
+#define V3_DMA_CTLB_ADR0 0x0000008C
+#define V3_DMA_CTLB_ADR1 0x0000009C
+#define V3_DMA_DELAY 0x000000E0
+#define V3_MAIL_DATA 0x000000C0
+#define V3_PCI_MAIL_IEWR 0x000000D0
+#define V3_PCI_MAIL_IERD 0x000000D2
+#define V3_LB_MAIL_IEWR 0x000000D4
+#define V3_LB_MAIL_IERD 0x000000D6
+#define V3_MAIL_WR_STAT 0x000000D8
+#define V3_MAIL_RD_STAT 0x000000DA
+#define V3_QBA_MAP 0x000000DC
+
+/* PCI STATUS bits */
+#define V3_PCI_STAT_PAR_ERR BIT(15)
+#define V3_PCI_STAT_SYS_ERR BIT(14)
+#define V3_PCI_STAT_M_ABORT_ERR BIT(13)
+#define V3_PCI_STAT_T_ABORT_ERR BIT(12)
+
+/* LB ISTAT bits */
+#define V3_LB_ISTAT_MAILBOX BIT(7)
+#define V3_LB_ISTAT_PCI_RD BIT(6)
+#define V3_LB_ISTAT_PCI_WR BIT(5)
+#define V3_LB_ISTAT_PCI_INT BIT(4)
+#define V3_LB_ISTAT_PCI_PERR BIT(3)
+#define V3_LB_ISTAT_I2O_QWR BIT(2)
+#define V3_LB_ISTAT_DMA1 BIT(1)
+#define V3_LB_ISTAT_DMA0 BIT(0)
+
+/* PCI COMMAND bits */
+#define V3_COMMAND_M_FBB_EN BIT(9)
+#define V3_COMMAND_M_SERR_EN BIT(8)
+#define V3_COMMAND_M_PAR_EN BIT(6)
+#define V3_COMMAND_M_MASTER_EN BIT(2)
+#define V3_COMMAND_M_MEM_EN BIT(1)
+#define V3_COMMAND_M_IO_EN BIT(0)
+
+/* SYSTEM bits */
+#define V3_SYSTEM_M_RST_OUT BIT(15)
+#define V3_SYSTEM_M_LOCK BIT(14)
+#define V3_SYSTEM_UNLOCK 0xa05f
+
+/* PCI CFG bits */
+#define V3_PCI_CFG_M_I2O_EN BIT(15)
+#define V3_PCI_CFG_M_IO_REG_DIS BIT(14)
+#define V3_PCI_CFG_M_IO_DIS BIT(13)
+#define V3_PCI_CFG_M_EN3V BIT(12)
+#define V3_PCI_CFG_M_RETRY_EN BIT(10)
+#define V3_PCI_CFG_M_AD_LOW1 BIT(9)
+#define V3_PCI_CFG_M_AD_LOW0 BIT(8)
+/*
+ * This is the value applied to C/BE[3:1], with bit 0 always held 0
+ * during DMA access.
+ */
+#define V3_PCI_CFG_M_RTYPE_SHIFT 5
+#define V3_PCI_CFG_M_WTYPE_SHIFT 1
+#define V3_PCI_CFG_TYPE_DEFAULT 0x3
+
+/* PCI BASE bits (PCI -> Local Bus) */
+#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U
+#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U
+#define V3_PCI_BASE_M_PREFETCH BIT(3)
+#define V3_PCI_BASE_M_TYPE (3 << 1)
+#define V3_PCI_BASE_M_IO BIT(0)
+
+/* PCI MAP bits (PCI -> Local bus) */
+#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U
+#define V3_PCI_MAP_M_RD_POST_INH BIT(15)
+#define V3_PCI_MAP_M_ROM_SIZE (3 << 10)
+#define V3_PCI_MAP_M_SWAP (3 << 8)
+#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U
+#define V3_PCI_MAP_M_REG_EN BIT(1)
+#define V3_PCI_MAP_M_ENABLE BIT(0)
+
+/* LB_BASE0,1 bits (Local bus -> PCI) */
+#define V3_LB_BASE_ADR_BASE 0xfff00000U
+#define V3_LB_BASE_SWAP (3 << 8)
+#define V3_LB_BASE_ADR_SIZE (15 << 4)
+#define V3_LB_BASE_PREFETCH BIT(3)
+#define V3_LB_BASE_ENABLE BIT(0)
+
+#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4)
+#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4)
+#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4)
+#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4)
+#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4)
+#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4)
+#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4)
+#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4)
+#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4)
+#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4)
+#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4)
+#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4)
+
+#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE)
+
+/* LB_MAP0,1 bits (Local bus -> PCI) */
+#define V3_LB_MAP_MAP_ADR 0xfff0U
+#define V3_LB_MAP_TYPE (7 << 1)
+#define V3_LB_MAP_AD_LOW_EN BIT(0)
+
+#define V3_LB_MAP_TYPE_IACK (0 << 1)
+#define V3_LB_MAP_TYPE_IO (1 << 1)
+#define V3_LB_MAP_TYPE_MEM (3 << 1)
+#define V3_LB_MAP_TYPE_CONFIG (5 << 1)
+#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1)
+
+#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR)
+
+/* LB_BASE2 bits (Local bus -> PCI IO) */
+#define V3_LB_BASE2_ADR_BASE 0xff00U
+#define V3_LB_BASE2_SWAP_AUTO (3 << 6)
+#define V3_LB_BASE2_ENABLE BIT(0)
+
+#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE)
+
+/* LB_MAP2 bits (Local bus -> PCI IO) */
+#define V3_LB_MAP2_MAP_ADR 0xff00U
+
+#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR)
+
+/* FIFO priority bits */
+#define V3_FIFO_PRIO_LOCAL BIT(12)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11))
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9))
+#define V3_FIFO_PRIO_PCI BIT(4)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3))
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1))
+
+/* Local bus configuration bits */
+#define V3_LB_CFG_LB_TO_64_CYCLES 0x0000
+#define V3_LB_CFG_LB_TO_256_CYCLES BIT(13)
+#define V3_LB_CFG_LB_TO_512_CYCLES BIT(14)
+#define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14))
+#define V3_LB_CFG_LB_RST BIT(12)
+#define V3_LB_CFG_LB_PPC_RDY BIT(11)
+#define V3_LB_CFG_LB_LB_INT BIT(10)
+#define V3_LB_CFG_LB_ERR_EN BIT(9)
+#define V3_LB_CFG_LB_RDY_EN BIT(8)
+#define V3_LB_CFG_LB_BE_IMODE BIT(7)
+#define V3_LB_CFG_LB_BE_OMODE BIT(6)
+#define V3_LB_CFG_LB_ENDIAN BIT(5)
+#define V3_LB_CFG_LB_PARK_EN BIT(4)
+#define V3_LB_CFG_LB_FBB_DIS BIT(2)
+
+/* ARM Integrator-specific extended control registers */
+#define INTEGRATOR_SC_PCI_OFFSET 0x18
+#define INTEGRATOR_SC_PCI_ENABLE BIT(0)
+#define INTEGRATOR_SC_PCI_INTCLR BIT(1)
+#define INTEGRATOR_SC_LBFADDR_OFFSET 0x20
+#define INTEGRATOR_SC_LBFCODE_OFFSET 0x24
+
+struct v3_pci {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *config_base;
+ struct pci_bus *bus;
+ u32 config_mem;
+ u32 io_mem;
+ u32 non_pre_mem;
+ u32 pre_mem;
+ phys_addr_t io_bus_addr;
+ phys_addr_t non_pre_bus_addr;
+ phys_addr_t pre_bus_addr;
+ struct regmap *map;
+};
+
+/*
+ * The V3 PCI interface chip in Integrator provides several windows from
+ * local bus memory into the PCI memory areas. Unfortunately, there
+ * are not really enough windows for our usage, therefore we reuse
+ * one of the windows for access to PCI configuration space. On the
+ * Integrator/AP, the memory map is as follows:
+ *
+ * Local Bus Memory Usage
+ *
+ * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable
+ * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable
+ * 60000000 - 60FFFFFF PCI IO. 16M
+ * 61000000 - 61FFFFFF PCI Configuration. 16M
+ *
+ * There are three V3 windows, each described by a pair of V3 registers.
+ * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2.
+ * Base0 and Base1 can be used for any type of PCI memory access. Base2
+ * can be used either for PCI I/O or for I20 accesses. By default, uHAL
+ * uses this only for PCI IO space.
+ *
+ * Normally these spaces are mapped using the following base registers:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF
+ *
+ * This means that I20 and PCI configuration space accesses will fail.
+ * When PCI configuration accesses are needed (via the uHAL PCI
+ * configuration space primitives) we must remap the spaces as follows:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1
+ *
+ * To make this work, the code depends on overlapping windows working.
+ * The V3 chip translates an address by checking its range within
+ * each of the BASE/MAP pairs in turn (in ascending register number
+ * order). It will use the first matching pair. So, for example,
+ * if the same address is mapped by both LB_BASE0/LB_MAP0 and
+ * LB_BASE1/LB_MAP1, the V3 will use the translation from
+ * LB_BASE0/LB_MAP0.
+ *
+ * To allow PCI Configuration space access, the code enlarges the
+ * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes
+ * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can
+ * be remapped for use by configuration cycles.
+ *
+ * At the end of the PCI Configuration space accesses,
+ * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window
+ * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to
+ * reveal the now restored LB_BASE1/LB_MAP1 window.
+ *
+ * NOTE: We do not set up I2O mapping. I suspect that this is only
+ * for an intelligent (target) device. Using I2O disables most of
+ * the mappings into PCI memory.
+ */
+static void __iomem *v3_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ unsigned int address, mapaddress, busnr;
+
+ busnr = bus->number;
+ if (busnr == 0) {
+ int slot = PCI_SLOT(devfn);
+
+ /*
+ * local bus segment so need a type 0 config cycle
+ *
+ * build the PCI configuration "address" with one-hot in
+ * A31-A11
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 are 0 (0)
+ */
+ address = PCI_FUNC(devfn) << 8;
+ mapaddress = V3_LB_MAP_TYPE_CONFIG;
+
+ if (slot > 12)
+ /*
+ * high order bits are handled by the MAP register
+ */
+ mapaddress |= BIT(slot - 5);
+ else
+ /*
+ * low order bits handled directly in the address
+ */
+ address |= BIT(slot + 11);
+ } else {
+ /*
+ * not the local bus segment so need a type 1 config cycle
+ *
+ * address:
+ * 23:16 = bus number
+ * 15:11 = slot number (7:3 of devfn)
+ * 10:8 = func number (2:0 of devfn)
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 from host bus (1)
+ */
+ mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN;
+ address = (busnr << 16) | (devfn << 8);
+ }
+
+ /*
+ * Set up base0 to see all 512Mbytes of memory space (not
+ * prefetchable), this frees up base1 for re-use by
+ * configuration memory
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+
+ /*
+ * Set up base1/map1 to point into configuration space.
+ * The config mem is always 16MB.
+ */
+ writel(v3_addr_to_lb_base(v3->config_mem) |
+ V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(mapaddress, v3->base + V3_LB_MAP1);
+
+ return v3->config_base + address + offset;
+}
+
+static void v3_unmap_bus(struct v3_pci *v3)
+{
+ /*
+ * Reassign base1 for use by prefetchable PCI memory
+ */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+
+ /*
+ * And shrink base0 back to a 256M window (NOTE: MAP0 already correct)
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+}
+
+static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+ ret = pci_generic_config_read(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+ ret = pci_generic_config_write(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static struct pci_ops v3_pci_ops = {
+ .map_bus = v3_map_bus,
+ .read = v3_pci_read_config,
+ .write = v3_pci_write_config,
+};
+
+static irqreturn_t v3_irq(int irq, void *data)
+{
+ struct v3_pci *v3 = data;
+ struct device *dev = v3->dev;
+ u32 status;
+
+ status = readw(v3->base + V3_PCI_STAT);
+ if (status & V3_PCI_STAT_PAR_ERR)
+ dev_err(dev, "parity error interrupt\n");
+ if (status & V3_PCI_STAT_SYS_ERR)
+ dev_err(dev, "system error interrupt\n");
+ if (status & V3_PCI_STAT_M_ABORT_ERR)
+ dev_err(dev, "master abort error interrupt\n");
+ if (status & V3_PCI_STAT_T_ABORT_ERR)
+ dev_err(dev, "target abort error interrupt\n");
+ writew(status, v3->base + V3_PCI_STAT);
+
+ status = readb(v3->base + V3_LB_ISTAT);
+ if (status & V3_LB_ISTAT_MAILBOX)
+ dev_info(dev, "PCI mailbox interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_RD)
+ dev_err(dev, "PCI target LB->PCI READ abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_WR)
+ dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_INT)
+ dev_info(dev, "PCI pin interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_PERR)
+ dev_err(dev, "PCI parity error interrupt\n");
+ if (status & V3_LB_ISTAT_I2O_QWR)
+ dev_info(dev, "I2O inbound post queue interrupt\n");
+ if (status & V3_LB_ISTAT_DMA1)
+ dev_info(dev, "DMA channel 1 interrupt\n");
+ if (status & V3_LB_ISTAT_DMA0)
+ dev_info(dev, "DMA channel 0 interrupt\n");
+ /* Clear all possible interrupts on the local bus */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ if (v3->map)
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ return IRQ_HANDLED;
+}
+
+static int v3_integrator_init(struct v3_pci *v3)
+{
+ unsigned int val;
+
+ v3->map =
+ syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon");
+ if (IS_ERR(v3->map)) {
+ dev_err(v3->dev, "no syscon\n");
+ return -ENODEV;
+ }
+
+ regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val);
+ /* Take the PCI bridge out of reset, clear IRQs */
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ if (!(val & INTEGRATOR_SC_PCI_ENABLE)) {
+ /* If we were in reset we need to sleep a bit */
+ msleep(230);
+
+ /* Set the physical base for the controller itself */
+ writel(0x6200, v3->base + V3_LB_IO_BASE);
+
+ /* Wait for the mailbox to settle after reset */
+ do {
+ writeb(0xaa, v3->base + V3_MAIL_DATA);
+ writeb(0x55, v3->base + V3_MAIL_DATA + 4);
+ } while (readb(v3->base + V3_MAIL_DATA) != 0xaa &&
+ readb(v3->base + V3_MAIL_DATA) != 0x55);
+ }
+
+ dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n");
+
+ return 0;
+}
+
+static int v3_pci_setup_resource(struct v3_pci *v3,
+ resource_size_t io_base,
+ struct pci_host_bridge *host,
+ struct resource_entry *win)
+{
+ struct device *dev = v3->dev;
+ struct resource *mem;
+ struct resource *io;
+ int ret;
+
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "V3 PCI I/O";
+ v3->io_mem = io_base;
+ v3->io_bus_addr = io->start - win->offset;
+ dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
+ io, &v3->io_bus_addr);
+ ret = pci_remap_iospace(io, io_base);
+ if (ret) {
+ dev_warn(dev,
+ "error %d: failed to map resource %pR\n",
+ ret, io);
+ return ret;
+ }
+ /* Setup window 2 - PCI I/O */
+ writel(v3_addr_to_lb_base2(v3->io_mem) |
+ V3_LB_BASE2_ENABLE,
+ v3->base + V3_LB_BASE2);
+ writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+ v3->base + V3_LB_MAP2);
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ if (mem->flags & IORESOURCE_PREFETCH) {
+ mem->name = "V3 PCI PRE-MEM";
+ v3->pre_mem = mem->start;
+ v3->pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev, "prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ if (v3->non_pre_mem &&
+ (mem->start != v3->non_pre_mem + SZ_256M)) {
+ dev_err(dev,
+ "prefetchable memory is not adjacent to non-prefetchable memory\n");
+ return -EINVAL;
+ }
+ /* Setup window 1 - PCI prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+ } else {
+ mem->name = "V3 PCI NON-PRE-MEM";
+ v3->non_pre_mem = mem->start;
+ v3->non_pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->non_pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev,
+ "non-prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ /* Setup window 0 - PCI non-prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+ writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM,
+ v3->base + V3_LB_MAP0);
+ }
+ break;
+ case IORESOURCE_BUS:
+ dev_dbg(dev, "BUS %pR\n", win->res);
+ host->busnr = win->res->start;
+ break;
+ default:
+ dev_info(dev, "Unknown resource type %lu\n",
+ resource_type(win->res));
+ break;
+ }
+
+ return 0;
+}
+
+static int v3_get_dma_range_config(struct v3_pci *v3,
+ struct of_pci_range *range,
+ u32 *pci_base, u32 *pci_map)
+{
+ struct device *dev = v3->dev;
+ u64 cpu_end = range->cpu_addr + range->size - 1;
+ u64 pci_end = range->pci_addr + range->size - 1;
+ u32 val;
+
+ if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ *pci_base = val;
+
+ if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+
+ switch (range->size) {
+ case SZ_1M:
+ val |= V3_LB_BASE_ADR_SIZE_1MB;
+ break;
+ case SZ_2M:
+ val |= V3_LB_BASE_ADR_SIZE_2MB;
+ break;
+ case SZ_4M:
+ val |= V3_LB_BASE_ADR_SIZE_4MB;
+ break;
+ case SZ_8M:
+ val |= V3_LB_BASE_ADR_SIZE_8MB;
+ break;
+ case SZ_16M:
+ val |= V3_LB_BASE_ADR_SIZE_16MB;
+ break;
+ case SZ_32M:
+ val |= V3_LB_BASE_ADR_SIZE_32MB;
+ break;
+ case SZ_64M:
+ val |= V3_LB_BASE_ADR_SIZE_64MB;
+ break;
+ case SZ_128M:
+ val |= V3_LB_BASE_ADR_SIZE_128MB;
+ break;
+ case SZ_256M:
+ val |= V3_LB_BASE_ADR_SIZE_256MB;
+ break;
+ case SZ_512M:
+ val |= V3_LB_BASE_ADR_SIZE_512MB;
+ break;
+ case SZ_1G:
+ val |= V3_LB_BASE_ADR_SIZE_1GB;
+ break;
+ case SZ_2G:
+ val |= V3_LB_BASE_ADR_SIZE_2GB;
+ break;
+ default:
+ dev_err(v3->dev, "illegal dma memory chunk size\n");
+ return -EINVAL;
+ break;
+ };
+ val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
+ *pci_map = val;
+
+ dev_dbg(dev,
+ "DMA MEM CPU: 0x%016llx -> 0x%016llx => "
+ "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
+ range->cpu_addr, cpu_end,
+ range->pci_addr, pci_end,
+ *pci_base, *pci_map);
+
+ return 0;
+}
+
+static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = v3->dev;
+ int i = 0;
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ int ret;
+ u32 pci_base, pci_map;
+
+ ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+ if (ret)
+ return ret;
+
+ if (i == 0) {
+ writel(pci_base, v3->base + V3_PCI_BASE0);
+ writel(pci_map, v3->base + V3_PCI_MAP0);
+ } else if (i == 1) {
+ writel(pci_base, v3->base + V3_PCI_BASE1);
+ writel(pci_map, v3->base + V3_PCI_MAP1);
+ } else {
+ dev_err(dev, "too many ranges, only two supported\n");
+ dev_err(dev, "range %d ignored\n", i);
+ }
+ i++;
+ }
+ return 0;
+}
+
+static int v3_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ resource_size_t io_base;
+ struct resource *regs;
+ struct resource_entry *win;
+ struct v3_pci *v3;
+ struct pci_host_bridge *host;
+ struct clk *clk;
+ u16 val;
+ int irq;
+ int ret;
+ LIST_HEAD(res);
+
+ host = pci_alloc_host_bridge(sizeof(*v3));
+ if (!host)
+ return -ENOMEM;
+
+ host->dev.parent = dev;
+ host->ops = &v3_pci_ops;
+ host->busnr = 0;
+ host->msi = NULL;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ v3 = pci_host_bridge_priv(host);
+ host->sysdata = v3;
+ v3->dev = dev;
+
+ /* Get and enable host clock */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "clock not found\n");
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "unable to enable clock\n");
+ return ret;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ v3->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->base))
+ return PTR_ERR(v3->base);
+ /*
+ * The hardware has a register with the physical base address
+ * of the V3 controller itself, verify that this is the same
+ * as the physical memory we've remapped it from.
+ */
+ if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16))
+ dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n",
+ readl(v3->base + V3_LB_IO_BASE), regs);
+
+ /* Configuration space is 16MB directly mapped */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (resource_size(regs) != SZ_16M) {
+ dev_err(dev, "config mem is not 16MB!\n");
+ return -EINVAL;
+ }
+ v3->config_mem = regs->start;
+ v3->config_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->config_base))
+ return PTR_ERR(v3->config_base);
+
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ if (ret)
+ return ret;
+
+ ret = devm_request_pci_bus_resources(dev, &res);
+ if (ret)
+ return ret;
+
+ /* Get and request error IRQ resource */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "unable to obtain PCIv3 error IRQ\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, irq, v3_irq, 0,
+ "PCIv3 error", v3);
+ if (ret < 0) {
+ dev_err(dev,
+ "unable to request PCIv3 error IRQ %d (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /*
+ * Unlock V3 registers, but only if they were previously locked.
+ */
+ if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK)
+ writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM);
+
+ /* Disable all slave access while we set up the windows */
+ val = readw(v3->base + V3_PCI_CMD);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Put the PCI bus into reset */
+ val = readw(v3->base + V3_SYSTEM);
+ val &= ~V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /* Retry until we're ready */
+ val = readw(v3->base + V3_PCI_CFG);
+ val |= V3_PCI_CFG_M_RETRY_EN;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /* Set up the local bus protocol */
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */
+ val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */
+ val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */
+ val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */
+ writew(val, v3->base + V3_LB_CFG);
+
+ /* Enable the PCI bus master */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MASTER;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ ret = v3_pci_setup_resource(v3, io_base, host, win);
+ if (ret) {
+ dev_err(dev, "error setting up resources\n");
+ return ret;
+ }
+ }
+ ret = v3_pci_parse_map_dma_ranges(v3, np);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable PCI to host IO cycles, enable I/O buffers @3.3V,
+ * set AD_LOW0 to 1 if one of the LB_MAP registers choose
+ * to use this (should be unused).
+ */
+ writel(0x00000000, v3->base + V3_PCI_IO_BASE);
+ val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS |
+ V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0;
+ /*
+ * DMA read and write from PCI bus commands types
+ */
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT;
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /*
+ * Set the V3 FIFO such that writes have higher priority than
+ * reads, and local bus write causes local bus read fifo flush
+ * on aperture 1. Same for PCI.
+ */
+ writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1,
+ v3->base + V3_FIFO_PRIORITY);
+
+
+ /*
+ * Clear any error interrupts, and enable parity and write error
+ * interrupts
+ */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_LB_INT;
+ writew(val, v3->base + V3_LB_CFG);
+ writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Special Integrator initialization */
+ if (of_device_is_compatible(np, "arm,integrator-ap-pci")) {
+ ret = v3_integrator_init(v3);
+ if (ret)
+ return ret;
+ }
+
+ /* Post-init: enable PCI memory and invalidate (master already on) */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Clear pending interrupts */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ /* Read or write errors and parity errors cause interrupts */
+ writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Take the PCI bus out of reset so devices can initialize */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /*
+ * Re-lock the system register.
+ */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_LOCK;
+ writew(val, v3->base + V3_SYSTEM);
+
+ list_splice_init(&res, &host->windows);
+ ret = pci_scan_root_bus_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to register host: %d\n", ret);
+ return ret;
+ }
+ v3->bus = host->bus;
+
+ pci_bus_assign_resources(v3->bus);
+ pci_bus_add_devices(v3->bus);
+
+ return 0;
+}
+
+static const struct of_device_id v3_pci_of_match[] = {
+ {
+ .compatible = "v3,v360epc-pci",
+ },
+ {},
+};
+
+static struct platform_driver v3_pci_driver = {
+ .driver = {
+ .name = "pci-v3-semi",
+ .of_match_table = of_match_ptr(v3_pci_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = v3_pci_probe,
+};
+builtin_platform_driver(v3_pci_driver);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 087645116ecb..465aa2a1b38d 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -542,24 +542,6 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
struct device_node *np = port->node;
@@ -568,7 +550,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
struct device *dev = port->dev;
u8 ib_reg_mask = 0;
- if (pci_dma_range_parser_init(&parser, np)) {
+ if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
@@ -628,7 +610,7 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
-static int xgene_pcie_probe_bridge(struct platform_device *pdev)
+static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
@@ -709,7 +691,7 @@ static struct platform_driver xgene_pcie_driver = {
.of_match_table = of_match_ptr(xgene_pcie_match_table),
.suppress_bind_attrs = true,
},
- .probe = xgene_pcie_probe_bridge,
+ .probe = xgene_pcie_probe,
};
builtin_platform_driver(xgene_pcie_driver);
#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b468b8cccf8d..5cc4f594d79a 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -105,7 +105,7 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
@@ -142,7 +142,7 @@ static bool altera_pcie_valid_device(struct altera_pcie *pcie,
{
/* If there is no link, then there is no device */
if (bus->number != pcie->root_bus_nr) {
- if (!altera_pcie_link_is_up(pcie))
+ if (!altera_pcie_link_up(pcie))
return false;
}
@@ -412,7 +412,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
/* Wait for link is up */
start_jiffies = jiffies;
for (;;) {
- if (altera_pcie_link_is_up(pcie))
+ if (altera_pcie_link_up(pcie))
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
@@ -427,7 +427,7 @@ static void altera_pcie_retrain(struct altera_pcie *pcie)
{
u16 linkcap, linkstat, linkctl;
- if (!altera_pcie_link_is_up(pcie))
+ if (!altera_pcie_link_up(pcie))
return;
/*
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 2d0f535a2f69..990fc906d73d 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -179,7 +179,7 @@ static struct irq_chip iproc_msi_irq_chip = {
static struct msi_domain_info iproc_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
.chip = &iproc_msi_irq_chip,
};
@@ -237,7 +237,7 @@ static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
msg->address_lo = lower_32_bits(addr);
msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
+ msg->data = data->hwirq << 5;
}
static struct irq_chip iproc_msi_bottom_irq_chip = {
@@ -251,7 +251,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
void *args)
{
struct iproc_msi *msi = domain->host_data;
- int hwirq;
+ int hwirq, i;
mutex_lock(&msi->bitmap_lock);
@@ -267,10 +267,14 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
mutex_unlock(&msi->bitmap_lock);
- irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip,
- domain->host_data, handle_simple_irq, NULL, NULL);
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &iproc_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
- return 0;
+ return hwirq;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
@@ -302,7 +306,8 @@ static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
msg = (u32 *)(msi->eq_cpu + offs);
- hwirq = *msg & IPROC_MSI_EQ_MASK;
+ hwirq = readl(msg);
+ hwirq = (hwirq >> 5) + (hwirq & 0x1f);
/*
* Since we have multiple hwirq mapped to a single MSI vector,
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 3a8b9d20ee57..935909bbe5c4 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -1097,24 +1097,6 @@ err_ib:
return ret;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
struct of_pci_range range;
@@ -1122,7 +1104,7 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
int ret;
/* Get the dma-ranges from DT */
- ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+ ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4e0b25d09b0c..12796eccb2be 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1027,24 +1027,6 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
return 0;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
struct device_node *np)
{
@@ -1053,7 +1035,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
int index = 0;
int err;
- if (pci_dma_range_parser_init(&parser, np))
+ if (of_pci_dma_range_parser_init(&parser, np))
return -EINVAL;
/* Get the dma-ranges from DT */
diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c
index 95049de4131c..21a208da3f59 100644
--- a/drivers/pci/host/pcie-tango.c
+++ b/drivers/pci/host/pcie-tango.c
@@ -1,13 +1,173 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/pci-ecam.h>
#include <linux/delay.h>
-#include <linux/of.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+
+#define MSI_MAX 256
#define SMP8759_MUX 0x48
#define SMP8759_TEST_OUT 0x74
+#define SMP8759_DOORBELL 0x7c
+#define SMP8759_STATUS 0x80
+#define SMP8759_ENABLE 0xa0
struct tango_pcie {
- void __iomem *base;
+ DECLARE_BITMAP(used_msi, MSI_MAX);
+ u64 msi_doorbell;
+ spinlock_t used_msi_lock;
+ void __iomem *base;
+ struct irq_domain *dom;
+};
+
+static void tango_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
+ unsigned long status, base, virq, idx, pos = 0;
+
+ chained_irq_enter(chip, desc);
+ spin_lock(&pcie->used_msi_lock);
+
+ while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
+ base = round_down(pos, 32);
+ status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
+ for_each_set_bit(idx, &status, 32) {
+ virq = irq_find_mapping(pcie->dom, base + idx);
+ generic_handle_irq(virq);
+ }
+ pos = base + 32;
+ }
+
+ spin_unlock(&pcie->used_msi_lock);
+ chained_irq_exit(chip, desc);
+}
+
+static void tango_ack(struct irq_data *d)
+{
+ struct tango_pcie *pcie = d->chip_data;
+ u32 offset = (d->hwirq / 32) * 4;
+ u32 bit = BIT(d->hwirq % 32);
+
+ writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
+}
+
+static void update_msi_enable(struct irq_data *d, bool unmask)
+{
+ unsigned long flags;
+ struct tango_pcie *pcie = d->chip_data;
+ u32 offset = (d->hwirq / 32) * 4;
+ u32 bit = BIT(d->hwirq % 32);
+ u32 val;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
+ val = unmask ? val | bit : val & ~bit;
+ writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static void tango_mask(struct irq_data *d)
+{
+ update_msi_enable(d, false);
+}
+
+static void tango_unmask(struct irq_data *d)
+{
+ update_msi_enable(d, true);
+}
+
+static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct tango_pcie *pcie = d->chip_data;
+ msg->address_lo = lower_32_bits(pcie->msi_doorbell);
+ msg->address_hi = upper_32_bits(pcie->msi_doorbell);
+ msg->data = d->hwirq;
+}
+
+static struct irq_chip tango_chip = {
+ .irq_ack = tango_ack,
+ .irq_mask = tango_mask,
+ .irq_unmask = tango_unmask,
+ .irq_set_affinity = tango_set_affinity,
+ .irq_compose_msi_msg = tango_compose_msi_msg,
+};
+
+static void msi_ack(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void msi_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void msi_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip msi_chip = {
+ .name = "MSI",
+ .irq_ack = msi_ack,
+ .irq_mask = msi_mask,
+ .irq_unmask = msi_unmask,
+};
+
+static struct msi_domain_info msi_dom_info = {
+ .flags = MSI_FLAG_PCI_MSIX
+ | MSI_FLAG_USE_DEF_DOM_OPS
+ | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .chip = &msi_chip,
+};
+
+static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct tango_pcie *pcie = dom->host_data;
+ unsigned long flags;
+ int pos;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
+ if (pos >= MSI_MAX) {
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+ return -ENOSPC;
+ }
+ __set_bit(pos, pcie->used_msi);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+ irq_domain_set_info(dom, virq, pos, &tango_chip,
+ pcie, handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ unsigned long flags;
+ struct irq_data *d = irq_domain_get_irq_data(dom, virq);
+ struct tango_pcie *pcie = d->chip_data;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ __clear_bit(d->hwirq, pcie->used_msi);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static const struct irq_domain_ops dom_ops = {
+ .alloc = tango_irq_domain_alloc,
+ .free = tango_irq_domain_free,
};
static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -77,7 +237,11 @@ static int tango_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct tango_pcie *pcie;
struct resource *res;
- int ret;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct irq_domain *msi_dom, *irq_dom;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int virq, offset;
dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -96,6 +260,41 @@ static int tango_pcie_probe(struct platform_device *pdev)
if (!tango_pcie_link_up(pcie))
return -ENODEV;
+ if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
+ return -ENOENT;
+
+ if (of_pci_range_parser_one(&parser, &range) == NULL)
+ return -ENOENT;
+
+ range.pci_addr += range.size;
+ pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
+
+ for (offset = 0; offset < MSI_MAX / 8; offset += 4)
+ writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
+
+ virq = platform_get_irq(pdev, 1);
+ if (virq <= 0) {
+ dev_err(dev, "Failed to map IRQ\n");
+ return -ENXIO;
+ }
+
+ irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
+ if (!irq_dom) {
+ dev_err(dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
+ if (!msi_dom) {
+ dev_err(dev, "Failed to create MSI domain\n");
+ irq_domain_remove(irq_dom);
+ return -ENOMEM;
+ }
+
+ pcie->dom = irq_dom;
+ spin_lock_init(&pcie->used_msi_lock);
+ irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
+
return pci_host_common_probe(pdev, &smp8759_ecam_ops);
}
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 94e13cb8608f..7b5325990f5e 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -129,7 +129,7 @@ static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
writel(val, port->reg_base + reg);
}
-static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port)
{
return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
@@ -165,7 +165,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
/* Check if link is up when trying to access downstream ports */
if (bus->number != port->root_busno)
- if (!xilinx_pcie_link_is_up(port))
+ if (!xilinx_pcie_link_up(port))
return false;
/* Only one device down on each root port */
@@ -541,7 +541,7 @@ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
- if (xilinx_pcie_link_is_up(port))
+ if (xilinx_pcie_link_up(port))
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(dev, "PCIe Link is DOWN\n");
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
deleted file mode 100644
index c68366cee6b7..000000000000
--- a/drivers/pci/hotplug-pci.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Core PCI functionality used only by PCI hotplug */
-
-#include <linux/pci.h>
-#include <linux/export.h>
-#include "pci.h"
-
-int pci_hp_add_bridge(struct pci_dev *dev)
-{
- struct pci_bus *parent = dev->bus;
- int pass, busnr, start = parent->busn_res.start;
- int end = parent->busn_res.end;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent), busnr))
- break;
- }
- if (busnr-- > end) {
- printk(KERN_ERR "No bus number available for hot-added bridge %s\n",
- pci_name(dev));
- return -1;
- }
- for (pass = 0; pass < 2; pass++)
- busnr = pci_scan_bridge(parent, dev, busnr, pass);
- if (!dev->subordinate)
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5ed2dcaa8e27..5db6f1839dad 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -462,18 +462,15 @@ static void enable_slot(struct acpiphp_slot *slot)
acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ for_each_pci_bridge(dev, bus) {
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
- if (pci_is_bridge(dev)) {
- max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate) {
- check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
- }
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 80c80017197d..f616358fa938 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -286,14 +286,11 @@ int cpci_configure_slot(struct slot *slot)
}
parent = slot->dev->bus;
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
- continue;
- if (pci_is_bridge(dev))
+ for_each_pci_bridge(dev, parent) {
+ if (PCI_SLOT(dev->devfn) == PCI_SLOT(slot->devfn))
pci_hp_add_bridge(dev);
}
-
pci_assign_unassigned_bridge_resources(parent->self);
pci_bus_add_devices(parent);
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 48c8a066a6b7..c2bbe6b65d06 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -410,7 +410,7 @@ void cpqhp_create_debugfs_files(struct controller *ctrl);
void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-void cpqhp_pushbutton_thread(unsigned long event_pointer);
+void cpqhp_pushbutton_thread(struct timer_list *t);
irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
int cpqhp_find_available_resources(struct controller *ctrl,
void __iomem *rom_start);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4d06b8461255..70967ac75265 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -661,9 +661,8 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot->p_sm_slot = slot_entry;
- init_timer(&slot->task_event);
+ timer_setup(&slot->task_event, cpqhp_pushbutton_thread, 0);
slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
/*FIXME: these capabilities aren't used but if they are
* they need to be correctly implemented
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index a55653b54eed..a93069e739cb 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -47,7 +47,7 @@ static void interrupt_event_handler(struct controller *ctrl);
static struct task_struct *cpqhp_event_thread;
-static unsigned long pushbutton_pending; /* = 0 */
+static struct timer_list *pushbutton_pending; /* = NULL */
/* delay is in jiffies to wait for */
static void long_delay(int delay)
@@ -1732,9 +1732,10 @@ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controll
return 0;
}
-static void pushbutton_helper_thread(unsigned long data)
+static void pushbutton_helper_thread(struct timer_list *t)
{
- pushbutton_pending = data;
+ pushbutton_pending = t;
+
wake_up_process(cpqhp_event_thread);
}
@@ -1883,13 +1884,13 @@ static void interrupt_event_handler(struct controller *ctrl)
wait_for_ctrl_irq(ctrl);
mutex_unlock(&ctrl->crit_sect);
- init_timer(&p_slot->task_event);
+ timer_setup(&p_slot->task_event,
+ pushbutton_helper_thread,
+ 0);
p_slot->hp_slot = hp_slot;
p_slot->ctrl = ctrl;
/* p_slot->physical_slot = physical_slot; */
p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */
- p_slot->task_event.function = pushbutton_helper_thread;
- p_slot->task_event.data = (u32) p_slot;
dbg("add_timer p_slot = %p\n", p_slot);
add_timer(&p_slot->task_event);
@@ -1920,15 +1921,15 @@ static void interrupt_event_handler(struct controller *ctrl)
* Scheduled procedure to handle blocking stuff for the pushbuttons.
* Handles all pending events and exits.
*/
-void cpqhp_pushbutton_thread(unsigned long slot)
+void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
u8 device;
struct pci_func *func;
- struct slot *p_slot = (struct slot *) slot;
+ struct slot *p_slot = from_timer(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
- pushbutton_pending = 0;
+ pushbutton_pending = NULL;
hp_slot = p_slot->hp_slot;
device = p_slot->device;
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index dc1876feb06f..25edd0b18b75 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1267,20 +1267,19 @@ static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
size = size & 0xFFFFFFFC;
size = ~size + 1;
end_address = start_address + size - 1;
- if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) {
- err("cannot find corresponding IO resource to remove\n");
- return -EIO;
- }
+ if (ibmphp_find_resource(bus, start_address, &io, IO))
+ goto report_search_failure;
+
debug("io->start = %x\n", io->start);
temp_end = io->end;
start_address = io->end + 1;
ibmphp_remove_resource(io);
/* This is needed b/c of the old I/O restrictions in the BIOS */
while (temp_end < end_address) {
- if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) {
- err("cannot find corresponding IO resource to remove\n");
- return -EIO;
- }
+ if (ibmphp_find_resource(bus, start_address,
+ &io, IO))
+ goto report_search_failure;
+
debug("io->start = %x\n", io->start);
temp_end = io->end;
start_address = io->end + 1;
@@ -1327,6 +1326,10 @@ static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
} /* end of for */
return 0;
+
+report_search_failure:
+ err("cannot find corresponding IO resource to remove\n");
+ return -EIO;
}
static int unconfigure_boot_bridge(u8 busno, u8 device, u8 function)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ec0b4c11ccd9..83f3d4af3677 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -113,10 +113,11 @@ static int board_added(struct slot *p_slot)
retval = pciehp_configure_device(p_slot);
if (retval) {
- ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
- pci_domain_nr(parent), parent->number);
- if (retval != -EEXIST)
+ if (retval != -EEXIST) {
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
+ pci_domain_nr(parent), parent->number);
goto err_exit;
+ }
}
pciehp_green_led_on(p_slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index e5d5ce9e3010..7bab0606f1a9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -50,14 +50,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
/* This is the interrupt polling timeout function. */
-static void int_poll_timeout(unsigned long data)
+static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = (struct controller *)data;
+ struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
pcie_isr(0, ctrl);
- init_timer(&ctrl->poll_timer);
if (!pciehp_poll_time)
pciehp_poll_time = 2; /* default polling interval is 2 sec */
@@ -71,8 +70,6 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
if ((sec <= 0) || (sec > 60))
sec = 2;
- ctrl->poll_timer.function = &int_poll_timeout;
- ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
@@ -83,7 +80,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
/* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
- init_timer(&ctrl->poll_timer);
+ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
return 0;
}
@@ -764,8 +761,7 @@ int pciehp_reset_slot(struct slot *slot, int probe)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
if (pciehp_poll_mode)
- int_poll_timeout(ctrl->poll_timer.data);
-
+ int_poll_timeout(&ctrl->poll_timer);
return 0;
}
@@ -795,7 +791,7 @@ static int pcie_init_slot(struct controller *ctrl)
if (!slot)
return -ENOMEM;
- slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
+ slot->wq = alloc_ordered_workqueue("pciehp-%u", 0, PSN(ctrl));
if (!slot->wq)
goto abort;
@@ -862,11 +858,16 @@ struct controller *pcie_init(struct pcie_device *dev)
if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
- /* Clear all remaining event bits in Slot Status register */
+ /*
+ * Clear all remaining event bits in Slot Status register except
+ * Presence Detect Changed. We want to make sure possible
+ * hotplug event is triggered when the interrupt is unmasked so
+ * that we don't lose that event.
+ */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 19f30a9f461d..2a1ca020cf5a 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -46,7 +46,11 @@ int pciehp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n",
+ /*
+ * The device is already there. Either configured by the
+ * boot firmware or a previous hotplug event.
+ */
+ ctrl_dbg(ctrl, "Device %s already exists at %04x:%02x:00, skipping hot-add\n",
pci_name(dev), pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
ret = -EEXIST;
@@ -60,9 +64,8 @@ int pciehp_configure_device(struct slot *p_slot)
goto out;
}
- list_for_each_entry(dev, &parent->devices, bus_list)
- if (pci_is_bridge(dev))
- pci_hp_add_bridge(dev);
+ for_each_pci_bridge(dev, parent)
+ pci_hp_add_bridge(dev);
pci_assign_unassigned_bridge_resources(bridge);
pcie_bus_configure_settings(parent);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index e5824c7b7b6b..4810e9626d9f 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -229,14 +229,13 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index,
/*
* This is the interrupt polling timeout function.
*/
-static void int_poll_timeout(unsigned long data)
+static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = (struct controller *)data;
+ struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
- init_timer(&ctrl->poll_timer);
if (!shpchp_poll_time)
shpchp_poll_time = 2; /* default polling interval is 2 sec */
@@ -252,8 +251,6 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
if ((sec <= 0) || (sec > 60))
sec = 2;
- ctrl->poll_timer.function = &int_poll_timeout;
- ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
@@ -1054,7 +1051,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (shpchp_poll_mode) {
/* Install interrupt polling timer. Start with 10 sec delay */
- init_timer(&ctrl->poll_timer);
+ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
} else {
/* Installs the interrupt handler */
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index f8cd3a27e351..ea63db58b4b1 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -61,10 +61,8 @@ int shpchp_configure_device(struct slot *p_slot)
goto out;
}
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != p_slot->device)
- continue;
- if (pci_is_bridge(dev))
+ for_each_pci_bridge(dev, parent) {
+ if (PCI_SLOT(dev->devfn) == p_slot->device)
pci_hp_add_bridge(dev);
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ac41c8be9200..6bacb8995e96 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -113,7 +113,7 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
}
-int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
int rc = -ENOMEM;
@@ -134,7 +134,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
+ virtfn->device = iov->vf_device;
rc = pci_setup_device(virtfn);
if (rc)
goto failed0;
@@ -157,12 +157,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
BUG_ON(rc);
}
- if (reset)
- __pci_reset_function(virtfn);
-
pci_device_add(virtfn, virtfn->bus);
- pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
@@ -173,6 +169,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+ pci_bus_add_device(virtfn);
+
return 0;
failed2:
@@ -187,7 +185,7 @@ failed:
return rc;
}
-void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
{
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
@@ -198,11 +196,6 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
if (!virtfn)
return;
- if (reset) {
- device_release_driver(&virtfn->dev);
- __pci_reset_function(virtfn);
- }
-
sprintf(buf, "virtfn%u", id);
sysfs_remove_link(&dev->dev.kobj, buf);
/*
@@ -317,7 +310,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
pci_cfg_access_unlock(dev);
for (i = 0; i < initial; i++) {
- rc = pci_iov_add_virtfn(dev, i, 0);
+ rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
@@ -329,7 +322,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
failed:
while (i--)
- pci_iov_remove_virtfn(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i);
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
@@ -356,7 +349,7 @@ static void sriov_disable(struct pci_dev *dev)
return;
for (i = 0; i < iov->num_VFs; i++)
- pci_iov_remove_virtfn(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
@@ -449,6 +442,7 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
iov->drivers_autoprobe = true;
@@ -504,6 +498,14 @@ static void sriov_restore_state(struct pci_dev *dev)
if (ctrl & PCI_SRIOV_CTRL_VFE)
return;
+ /*
+ * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
+ * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
+ */
+ ctrl &= ~PCI_SRIOV_CTRL_ARI;
+ ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
+
for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
pci_update_resource(dev, i);
@@ -724,7 +726,7 @@ int pci_vfs_assigned(struct pci_dev *dev)
* determine the device ID for the VFs, the vendor ID will be the
* same as the PF so there is no need to check for that one
*/
- pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
+ dev_id = dev->sriov->vf_device;
/* loop through all the VFs to see if we own any that are assigned */
vfdev = pci_get_device(dev->vendor, dev_id, NULL);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a8da543b3814..4708eb9df71b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
union acpi_object *obj;
struct pci_host_bridge *bridge;
- if (acpi_pci_disabled || !bus->bridge)
+ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
return;
acpi_pci_slot_enumerate(bus);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 00fa4278c1f4..06c7f0b85cd2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -649,6 +649,33 @@ exit:
return count;
}
+static ssize_t sriov_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->offset);
+}
+
+static ssize_t sriov_stride_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->stride);
+}
+
+static ssize_t sriov_vf_device_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%x\n", pdev->sriov->vf_device);
+}
+
static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -677,6 +704,9 @@ static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
static struct device_attribute sriov_numvfs_attr =
__ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_numvfs_show, sriov_numvfs_store);
+static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset);
+static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride);
+static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device);
static struct device_attribute sriov_drivers_autoprobe_attr =
__ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store);
@@ -1749,6 +1779,9 @@ static const struct attribute_group pci_dev_hp_attr_group = {
static struct attribute *sriov_dev_attrs[] = {
&sriov_totalvfs_attr.attr,
&sriov_numvfs_attr.attr,
+ &sriov_offset_attr.attr,
+ &sriov_stride_attr.attr,
+ &sriov_vf_device_attr.attr,
&sriov_drivers_autoprobe_attr.attr,
NULL,
};
@@ -1796,6 +1829,6 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
NULL,
};
-struct device_type pci_dev_type = {
+const struct device_type pci_dev_type = {
.groups = pci_dev_attr_groups,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 374f5686e2bc..4a7c6864fdf4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2965,6 +2965,107 @@ bool pci_acs_path_enabled(struct pci_dev *start,
}
/**
+ * pci_rebar_find_pos - find position of resize ctrl reg for BAR
+ * @pdev: PCI device
+ * @bar: BAR to find
+ *
+ * Helper to find the position of the ctrl register for a BAR.
+ * Returns -ENOTSUPP if resizable BARs are not supported at all.
+ * Returns -ENOENT if no ctrl register for the BAR could be found.
+ */
+static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ if (!pos)
+ return -ENOTSUPP;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_idx == bar)
+ return pos;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pci_rebar_get_possible_sizes - get possible sizes for BAR
+ * @pdev: PCI device
+ * @bar: BAR to query
+ *
+ * Get the possible sizes of a resizable BAR as bitmask defined in the spec
+ * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ */
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 cap;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+ return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+}
+
+/**
+ * pci_rebar_get_current_size - get the current size of a BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ *
+ * Read the size of a BAR from the resizable BAR config.
+ * Returns size if found or negative error code.
+ */
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
+}
+
+/**
+ * pci_rebar_set_size - set a new size for a BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ *
+ * Set the new size of a BAR as defined in the spec.
+ * Returns zero if resizing was successful, error code otherwise.
+ */
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= size << 8;
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+ return 0;
+}
+
+/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
@@ -3470,7 +3571,7 @@ EXPORT_SYMBOL(devm_pci_remap_cfgspace);
* All operations are managed and will be undone on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
+ * on failure. Usage example::
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_pci_remap_cfg_resource(&pdev->dev, res);
@@ -4145,35 +4246,6 @@ static void pci_dev_restore(struct pci_dev *dev)
}
/**
- * __pci_reset_function - reset a PCI device function
- * @dev: PCI device to reset
- *
- * Some devices allow an individual function to be reset without affecting
- * other functions in the same device. The PCI device must be responsive
- * to PCI config space in order to use this function.
- *
- * The device function is presumed to be unused when this function is called.
- * Resetting the device will make the contents of PCI configuration space
- * random, so any caller of this must be prepared to reinitialise the
- * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
- * etc.
- *
- * Returns 0 if the device function was successfully reset or negative if the
- * device doesn't support resetting a single function.
- */
-int __pci_reset_function(struct pci_dev *dev)
-{
- int ret;
-
- pci_dev_lock(dev);
- ret = __pci_reset_function_locked(dev);
- pci_dev_unlock(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_reset_function);
-
-/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
* @dev: PCI device to reset
@@ -4198,6 +4270,14 @@ int __pci_reset_function_locked(struct pci_dev *dev)
might_sleep();
+ /*
+ * A reset method returns -ENOTTY if it doesn't support this device
+ * and we should try the next method.
+ *
+ * If it returns 0 (success), we're finished. If it returns any
+ * other error, we're also finished: this indicates that further
+ * reset mechanisms might be broken on the device.
+ */
rc = pci_dev_specific_reset(dev, 0);
if (rc != -ENOTTY)
return rc;
@@ -4263,8 +4343,8 @@ int pci_probe_reset_function(struct pci_dev *dev)
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from __pci_reset_function in that it saves and restores device state
- * over the reset.
+ * from __pci_reset_function_locked() in that it saves and restores device state
+ * over the reset and takes the PCI device lock.
*
* Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
@@ -4299,7 +4379,7 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from __pci_reset_function() in that it saves and restores device state
+ * from __pci_reset_function_locked() in that it saves and restores device state
* over the reset. It also differs from pci_reset_function() in that it
* requires the PCI device lock to be held.
*
@@ -4355,6 +4435,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
{
struct pci_dev *dev;
+
+ if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resetable(dev->subordinate)))
@@ -4419,6 +4503,10 @@ static bool pci_slot_resetable(struct pci_slot *slot)
{
struct pci_dev *dev;
+ if (slot->bus->self &&
+ (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fdb02c1f94bb..f6b58b32a67c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -193,7 +193,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern struct device_type pci_dev_type;
+extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
@@ -264,6 +264,7 @@ struct pci_sriov {
u16 num_VFs; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
+ u16 vf_device; /* VF device ID */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u8 max_VF_buses; /* max buses consumed by VFs */
@@ -367,4 +368,12 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
#endif
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
+static inline u64 pci_rebar_size_to_bytes(int size)
+{
+ return 1ULL << (size + 20);
+}
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 890efcc574cb..744805232155 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
*/
- pci_walk_bus(dev->bus, cb, &result_data);
+ if (state == pci_channel_io_normal)
+ /*
+ * the error is non fatal so the bus is ok, just invoke
+ * the callback for the function that logged the error.
+ */
+ cb(dev, &result_data);
+ else
+ pci_walk_bus(dev->bus, cb, &result_data);
}
return result_data.result;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index af2c0023a1c2..9783e10da3a9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -451,24 +451,25 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
- /* Choose the greater of the two T_cmn_mode_rstr_time */
- val1 = (upreg->l1ss_cap >> 8) & 0xFF;
- val2 = (upreg->l1ss_cap >> 8) & 0xFF;
+ /* Choose the greater of the two Port Common_Mode_Restore_Times */
+ val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
if (val1 > val2)
link->l1ss.ctl1 |= val1 << 8;
else
link->l1ss.ctl1 |= val2 << 8;
+
/*
* We currently use LTR L1.2 threshold to be fixed constant picked from
* Intel's coreboot.
*/
link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS;
- /* Choose the greater of the two T_pwr_on */
- val1 = (upreg->l1ss_cap >> 19) & 0x1F;
- scale1 = (upreg->l1ss_cap >> 16) & 0x03;
- val2 = (dwreg->l1ss_cap >> 19) & 0x1F;
- scale2 = (dwreg->l1ss_cap >> 16) & 0x03;
+ /* Choose the greater of the two Port T_POWER_ON times */
+ val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+ val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
if (calc_l1ss_pwron(link->pdev, scale1, val1) >
calc_l1ss_pwron(link->downstream, scale2, val2))
@@ -647,21 +648,26 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
if (enable_req & ASPM_STATE_L1_2_MASK) {
- /* Program T_pwr_on in both ports */
+ /* Program T_POWER_ON times in both ports */
pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
link->l1ss.ctl2);
pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
link->l1ss.ctl2);
- /* Program T_cmn_mode in parent */
+ /* Program Common_Mode_Restore_Time in upstream device */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- 0xFF00, link->l1ss.ctl1);
-
- /* Program LTR L1.2 threshold in both ports */
- pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
- 0xE3FF0000, link->l1ss.ctl1);
+ PCI_L1SS_CTL1_CM_RESTORE_TIME,
+ link->l1ss.ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ link->l1ss.ctl1);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- 0xE3FF0000, link->l1ss.ctl1);
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ link->l1ss.ctl1);
}
val = 0;
@@ -803,10 +809,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
- * hierarchies.
+ * hierarchies. Note that some PCIe host implementations omit
+ * the root ports entirely, in which case a downstream port on
+ * a switch may become the root of the link state chain for all
+ * its subordinate endpoints.
*/
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
+ !pdev->bus->parent->self) {
link->root = link;
} else {
struct pcie_link_state *parent;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index fafdb165dd2e..df290aa58dce 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -226,6 +226,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+ if (rtsta == (u32) ~0)
+ break;
+
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
@@ -273,7 +276,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+ if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index ebc9d45bd731..a59210350c44 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,134 +44,113 @@ static void release_pcie_device(struct device *dev)
kfree(to_pcie_device(dev));
}
-/**
- * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
- * for given port
- * @dev: PCI Express port to handle
- * @irqs: Array of interrupt vectors to populate
- * @mask: Bitmask of port capabilities returned by get_port_device_capability()
- *
- * Return value: 0 on success, error code on failure
+/*
+ * Fill in *pme, *aer, *dpc with the relevant Interrupt Message Numbers if
+ * services are enabled in "mask". Return the number of MSI/MSI-X vectors
+ * required to accommodate the largest Message Number.
*/
-static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
+static int pcie_message_numbers(struct pci_dev *dev, int mask,
+ u32 *pme, u32 *aer, u32 *dpc)
{
- int nr_entries, entry, nvec = 0;
+ u32 nvec = 0, pos, reg32;
+ u16 reg16;
/*
- * Allocate as many entries as the port wants, so that we can check
- * which of them will be useful. Moreover, if nr_entries is correctly
- * equal to the number of entries this port actually uses, we'll happily
- * go through without any tricks.
+ * The Interrupt Message Number indicates which vector is used, i.e.,
+ * the MSI-X table entry or the MSI offset between the base Message
+ * Data and the generated interrupt message. See PCIe r3.1, sec
+ * 7.8.2, 7.10.10, 7.31.2.
*/
- nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
- PCI_IRQ_MSIX | PCI_IRQ_MSI);
- if (nr_entries < 0)
- return nr_entries;
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
- u16 reg16;
-
- /*
- * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
- * interrupts (when both are implemented) always share the
- * same MSI or MSI-X vector, as indicated by the Interrupt
- * Message Number field in the PCI Express Capabilities
- * register".
- *
- * Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
- * indicates the offset between the base Message Data and
- * the interrupt message that is generated."
- *
- * "For MSI-X, the [Interrupt Message Number] indicates
- * which MSI-X Table entry is used to generate the
- * interrupt message."
- */
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
- entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
- if (entry >= nr_entries)
- goto out_free_irqs;
-
- irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
- irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
-
- nvec = max(nvec, entry + 1);
+ *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
+ nvec = *pme + 1;
}
if (mask & PCIE_PORT_SERVICE_AER) {
- u32 reg32, pos;
-
- /*
- * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
- * Message Number in the Root Error Status register
- * indicates which MSI/MSI-X vector is used for AER.
- *
- * "For MSI, the [Advanced Error Interrupt Message Number]
- * indicates the offset between the base Message Data and
- * the interrupt message that is generated."
- *
- * "For MSI-X, the [Advanced Error Interrupt Message
- * Number] indicates which MSI-X Table entry is used to
- * generate the interrupt message."
- */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- entry = reg32 >> 27;
- if (entry >= nr_entries)
- goto out_free_irqs;
-
- irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
-
- nvec = max(nvec, entry + 1);
+ if (pos) {
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
+ &reg32);
+ *aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
+ nvec = max(nvec, *aer + 1);
+ }
}
if (mask & PCIE_PORT_SERVICE_DPC) {
- u16 reg16, pos;
-
- /*
- * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
- * Message Number in the DPC Capability register indicates
- * which MSI/MSI-X vector is used for DPC.
- *
- * "For MSI, the [DPC Interrupt Message Number] indicates
- * the offset between the base Message Data and the
- * interrupt message that is generated."
- *
- * "For MSI-X, the [DPC Interrupt Message Number] indicates
- * which MSI-X Table entry is used to generate the
- * interrupt message."
- */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
- pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, &reg16);
- entry = reg16 & 0x1f;
- if (entry >= nr_entries)
- goto out_free_irqs;
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
+ &reg16);
+ *dpc = reg16 & PCI_EXP_DPC_IRQ;
+ nvec = max(nvec, *dpc + 1);
+ }
+ }
+
+ return nvec;
+}
- irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
+/**
+ * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
+ * for given port
+ * @dev: PCI Express port to handle
+ * @irqs: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 on success, error code on failure
+ */
+static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
+{
+ int nr_entries, nvec;
+ u32 pme = 0, aer = 0, dpc = 0;
- nvec = max(nvec, entry + 1);
+ /* Allocate the maximum possible number of MSI/MSI-X vectors */
+ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nr_entries < 0)
+ return nr_entries;
+
+ /* See how many and which Interrupt Message Numbers we actually use */
+ nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
+ if (nvec > nr_entries) {
+ pci_free_irq_vectors(dev);
+ return -EIO;
}
/*
- * If nvec is equal to the allocated number of entries, we can just use
- * what we have. Otherwise, the port has some extra entries not for the
- * services we know and we need to work around that.
+ * If we allocated more than we need, free them and reallocate fewer.
+ *
+ * Reallocating may change the specific vectors we get, so
+ * pci_irq_vector() must be done *after* the reallocation.
+ *
+ * If we're using MSI, hardware is *allowed* to change the Interrupt
+ * Message Numbers when we free and reallocate the vectors, but we
+ * assume it won't because we allocate enough vectors for the
+ * biggest Message Number we found.
*/
if (nvec != nr_entries) {
- /* Drop the temporary MSI-X setup */
pci_free_irq_vectors(dev);
- /* Now allocate the MSI-X vectors for real */
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
}
- return 0;
+ /* PME and hotplug share an MSI/MSI-X vector */
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, pme);
+ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, pme);
+ }
-out_free_irqs:
- pci_free_irq_vectors(dev);
- return -EIO;
+ if (mask & PCIE_PORT_SERVICE_AER)
+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, aer);
+
+ if (mask & PCIE_PORT_SERVICE_DPC)
+ irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, dpc);
+
+ return 0;
}
/**
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 68c389c7b975..ffbf4e723527 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -247,6 +247,7 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
+ .shutdown = pcie_portdrv_remove,
.err_handler = &pcie_portdrv_err_handler,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ff94b69738a8..14e0ea1ff38b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -959,7 +959,21 @@ static void pci_enable_crs(struct pci_dev *pdev)
PCI_EXP_RTCTL_CRSSVE);
}
+static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ unsigned int available_buses);
+
/*
+ * pci_scan_bridge_extend() - Scan buses behind a bridge
+ * @bus: Parent bus the bridge is on
+ * @dev: Bridge itself
+ * @max: Starting subordinate number of buses behind this bridge
+ * @available_buses: Total number of buses available for this bridge and
+ * the devices below. After the minimal bus space has
+ * been allocated the remaining buses will be
+ * distributed equally between hotplug-capable bridges.
+ * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
+ * that need to be reconfigured.
+ *
* If it's a bridge, configure it and scan the bus behind it.
* For CardBus bridges, we don't scan behind as the devices will
* be handled by the bridge driver itself.
@@ -969,7 +983,9 @@ static void pci_enable_crs(struct pci_dev *pdev)
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
*/
-int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
+ int max, unsigned int available_buses,
+ int pass)
{
struct pci_bus *child;
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
@@ -1076,9 +1092,13 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
child = pci_add_new_bus(bus, dev, max+1);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1, 0xff);
+ pci_bus_insert_busn_res(child, max+1,
+ bus->busn_res.end);
}
max++;
+ if (available_buses)
+ available_buses--;
+
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->busn_res.start) << 8)
@@ -1100,7 +1120,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
- max = pci_scan_child_bus(child);
+ max = pci_scan_child_bus_extend(child, available_buses);
} else {
/*
* For CardBus bridges, we leave 4 bus numbers
@@ -1168,6 +1188,28 @@ out:
return max;
}
+
+/*
+ * pci_scan_bridge() - Scan buses behind a bridge
+ * @bus: Parent bus the bridge is on
+ * @dev: Bridge itself
+ * @max: Starting subordinate number of buses behind this bridge
+ * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
+ * that need to be reconfigured.
+ *
+ * If it's a bridge, configure it and scan the bus behind it.
+ * For CardBus bridges, we don't scan behind as the devices will
+ * be handled by the bridge driver itself.
+ *
+ * We need to process bridges in two passes -- first we scan those
+ * already configured by the BIOS and after we are done with all of
+ * them, we proceed to assigning numbers to the remaining buses in
+ * order to avoid overlaps between old and new bus numbers.
+ */
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+{
+ return pci_scan_bridge_extend(bus, dev, max, 0, pass);
+}
EXPORT_SYMBOL(pci_scan_bridge);
/*
@@ -2396,9 +2438,24 @@ void __weak pcibios_fixup_bus(struct pci_bus *bus)
/* nothing to do, expected to be removed in the future */
}
-unsigned int pci_scan_child_bus(struct pci_bus *bus)
+/**
+ * pci_scan_child_bus_extend() - Scan devices below a bus
+ * @bus: Bus to scan for devices
+ * @available_buses: Total number of buses available (%0 does not try to
+ * extend beyond the minimal)
+ *
+ * Scans devices below @bus including subordinate buses. Returns new
+ * subordinate number including all the found devices. Passing
+ * @available_buses causes the remaining bus space to be distributed
+ * equally between hotplug-capable bridges to allow future extension of the
+ * hierarchy.
+ */
+static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ unsigned int available_buses)
{
- unsigned int devfn, pass, max = bus->busn_res.start;
+ unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
+ unsigned int start = bus->busn_res.start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
@@ -2408,7 +2465,8 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability. */
- max += pci_iov_bus_range(bus);
+ used_buses = pci_iov_bus_range(bus);
+ max += used_buses;
/*
* After performing arch-dependent fixup of the bus, look behind
@@ -2420,19 +2478,73 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
bus->is_added = 1;
}
- for (pass = 0; pass < 2; pass++)
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (pci_is_bridge(dev))
- max = pci_scan_bridge(bus, dev, max, pass);
+ /*
+ * Calculate how many hotplug bridges and normal bridges there
+ * are on this bus. We will distribute the additional available
+ * buses between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ if (dev->is_hotplug_bridge)
+ hotplug_bridges++;
+ else
+ normal_bridges++;
+ }
+
+ /*
+ * Scan bridges that are already configured. We don't touch them
+ * unless they are misconfigured (which will be done in the second
+ * scan below).
+ */
+ for_each_pci_bridge(dev, bus) {
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
+ used_buses += cmax - max;
+ }
+
+ /* Scan bridges that need to be reconfigured */
+ for_each_pci_bridge(dev, bus) {
+ unsigned int buses = 0;
+
+ if (!hotplug_bridges && normal_bridges == 1) {
+ /*
+ * There is only one bridge on the bus (upstream
+ * port) so it gets all available buses which it
+ * can then distribute to the possible hotplug
+ * bridges below.
+ */
+ buses = available_buses;
+ } else if (dev->is_hotplug_bridge) {
+ /*
+ * Distribute the extra buses between hotplug
+ * bridges if any.
+ */
+ buses = available_buses / hotplug_bridges;
+ buses = min(buses, available_buses - used_buses);
}
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
+ used_buses += max - cmax;
+ }
+
/*
* Make sure a hotplug bridge has at least the minimum requested
- * number of buses.
+ * number of buses but allow it to grow up to the maximum available
+ * bus number of there is room.
*/
- if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
- if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
- max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+ if (bus->self && bus->self->is_hotplug_bridge) {
+ used_buses = max_t(unsigned int, available_buses,
+ pci_hotplug_bus_size - 1);
+ if (max - start < used_buses) {
+ max = start + used_buses;
+
+ /* Do not allocate more buses than we have room left */
+ if (max > bus->busn_res.end)
+ max = bus->busn_res.end;
+
+ dev_dbg(&bus->dev, "%pR extended by %#02x\n",
+ &bus->busn_res, max - start);
+ }
}
/*
@@ -2445,6 +2557,18 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
return max;
}
+
+/**
+ * pci_scan_child_bus() - Scan devices below a bus
+ * @bus: Bus to scan for devices
+ *
+ * Scans devices below @bus including subordinate buses. Returns new
+ * subordinate number including all the found devices.
+ */
+unsigned int pci_scan_child_bus(struct pci_bus *bus)
+{
+ return pci_scan_child_bus_extend(bus, 0);
+}
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
/**
@@ -2737,3 +2861,38 @@ void __init pci_sort_breadthfirst(void)
{
bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
}
+
+int pci_hp_add_bridge(struct pci_dev *dev)
+{
+ struct pci_bus *parent = dev->bus;
+ int busnr, start = parent->busn_res.start;
+ unsigned int available_buses = 0;
+ int end = parent->busn_res.end;
+
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
+ return -1;
+ }
+
+ /* Scan bridges that are already configured */
+ busnr = pci_scan_bridge(parent, dev, busnr, 0);
+
+ /*
+ * Distribute the available bus numbers between hotplug-capable
+ * bridges to make extending the chain later possible.
+ */
+ available_buses = end - busnr;
+
+ /* Scan bridges that need to be reconfigured */
+ pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
+
+ if (!dev->subordinate)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 911b3b65c8b2..10684b17d0bd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3366,6 +3366,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+/*
+ * Root port on some Cavium CN8xxx chips do not successfully complete a bus
+ * reset when used with certain child devices. After the reset, config
+ * accesses to the child may fail.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -4212,17 +4219,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
#endif
}
+static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
+{
+ /*
+ * Effectively selects all downstream ports for whole ThunderX 1
+ * family by 0xf800 mask (which represents 8 SoCs), while the lower
+ * bits of device ID are used to indicate which subdevice is used
+ * within the SoC.
+ */
+ return (pci_is_pcie(dev) &&
+ (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
+ ((dev->device & 0xf800) == 0xa000));
+}
+
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
- * Cavium devices matching this quirk do not perform peer-to-peer
- * with other functions, allowing masking out these bits as if they
- * were unimplemented in the ACS capability.
+ * Cavium root ports don't advertise an ACS capability. However,
+ * the RTL internally implements similar protection as if ACS had
+ * Request Redirection, Completion Redirection, Source Validation,
+ * and Upstream Forwarding features enabled. Assert that the
+ * hardware implements and enables equivalent ACS functionality for
+ * these flags.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
- if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
+ if (!pci_quirk_cavium_acs_match(dev))
return -ENOTTY;
return acs_flags ? 0 : 1;
@@ -4800,3 +4822,11 @@ static void quirk_no_ats(struct pci_dev *pdev)
/* AMD Stoney platform GPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
+
+/* Freescale PCIe doesn't support MSI in RC mode */
+static void quirk_fsl_no_msi(struct pci_dev *pdev)
+{
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 73a03d382590..2fa0dbde36b7 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (dev->is_added) {
+ device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_release_driver(&dev->dev);
dev->is_added = 0;
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index b6edb187d160..1f5e6af96c83 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -147,12 +147,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
return NULL;
rom = ioremap(start, *size);
- if (!rom) {
- /* restore enable if ioremap fails */
- if (!(res->flags & IORESOURCE_ROM_ENABLE))
- pci_disable_rom(pdev);
- return NULL;
- }
+ if (!rom)
+ goto err_ioremap;
/*
* Try to find the true size of the ROM since sometimes the PCI window
@@ -160,7 +156,18 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
+ if (!*size)
+ goto invalid_rom;
+
return rom;
+
+invalid_rom:
+ iounmap(rom);
+err_ioremap:
+ /* restore enable if ioremap fails */
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ pci_disable_rom(pdev);
+ return NULL;
}
EXPORT_SYMBOL(pci_map_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 958da7db9033..b1ad466199ad 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1518,13 +1518,16 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
break;
}
}
+
+#define PCI_RES_TYPE_MASK \
+ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
+ IORESOURCE_MEM_64)
+
static void pci_bridge_release_resources(struct pci_bus *bus,
unsigned long type)
{
struct pci_dev *dev = bus->self;
struct resource *r;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
unsigned old_flags = 0;
struct resource *b_res;
int idx = 1;
@@ -1567,7 +1570,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
*/
release_child_resources(r);
if (!release_resource(r)) {
- type = old_flags = r->flags & type_mask;
+ type = old_flags = r->flags & PCI_RES_TYPE_MASK;
dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n",
PCI_BRIDGE_RESOURCES + idx, r);
/* keep the old size */
@@ -1758,8 +1761,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -1818,7 +1819,7 @@ again:
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & type_mask,
+ fail_res->flags & PCI_RES_TYPE_MASK,
rel_type);
/* restore size and flags */
@@ -1853,6 +1854,175 @@ void __init pci_assign_unassigned_resources(void)
}
}
+static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
+ struct list_head *add_list, resource_size_t available)
+{
+ struct pci_dev_resource *dev_res;
+
+ if (res->parent)
+ return;
+
+ if (resource_size(res) >= available)
+ return;
+
+ dev_res = res_to_dev_res(add_list, res);
+ if (!dev_res)
+ return;
+
+ /* Is there room to extend the window? */
+ if (available - resource_size(res) <= dev_res->add_size)
+ return;
+
+ dev_res->add_size = available - resource_size(res);
+ dev_dbg(&bridge->dev, "bridge window %pR extended by %pa\n", res,
+ &dev_res->add_size);
+}
+
+static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ struct list_head *add_list, resource_size_t available_io,
+ resource_size_t available_mmio, resource_size_t available_mmio_pref)
+{
+ resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
+ unsigned int normal_bridges = 0, hotplug_bridges = 0;
+ struct resource *io_res, *mmio_res, *mmio_pref_res;
+ struct pci_dev *dev, *bridge = bus->self;
+
+ io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+
+ /*
+ * Update additional resource list (add_list) to fill all the
+ * extra resource space available for this port except the space
+ * calculated in __pci_bus_size_bridges() which covers all the
+ * devices currently connected to the port and below.
+ */
+ extend_bridge_window(bridge, io_res, add_list, available_io);
+ extend_bridge_window(bridge, mmio_res, add_list, available_mmio);
+ extend_bridge_window(bridge, mmio_pref_res, add_list,
+ available_mmio_pref);
+
+ /*
+ * Calculate the total amount of extra resource space we can
+ * pass to bridges below this one. This is basically the
+ * extra space reduced by the minimal required space for the
+ * non-hotplug bridges.
+ */
+ remaining_io = available_io;
+ remaining_mmio = available_mmio;
+ remaining_mmio_pref = available_mmio_pref;
+
+ /*
+ * Calculate how many hotplug bridges and normal bridges there
+ * are on this bus. We will distribute the additional available
+ * resources between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ if (dev->is_hotplug_bridge)
+ hotplug_bridges++;
+ else
+ normal_bridges++;
+ }
+
+ for_each_pci_bridge(dev, bus) {
+ const struct resource *res;
+
+ if (dev->is_hotplug_bridge)
+ continue;
+
+ /*
+ * Reduce the available resource space by what the
+ * bridge and devices below it occupy.
+ */
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
+ if (!res->parent && available_io > resource_size(res))
+ remaining_io -= resource_size(res);
+
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
+ if (!res->parent && available_mmio > resource_size(res))
+ remaining_mmio -= resource_size(res);
+
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
+ if (!res->parent && available_mmio_pref > resource_size(res))
+ remaining_mmio_pref -= resource_size(res);
+ }
+
+ /*
+ * Go over devices on this bus and distribute the remaining
+ * resource space between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ struct pci_bus *b;
+
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ if (!hotplug_bridges && normal_bridges == 1) {
+ /*
+ * There is only one bridge on the bus (upstream
+ * port) so it gets all available resources
+ * which it can then distribute to the possible
+ * hotplug bridges below.
+ */
+ pci_bus_distribute_available_resources(b, add_list,
+ available_io, available_mmio,
+ available_mmio_pref);
+ } else if (dev->is_hotplug_bridge) {
+ resource_size_t align, io, mmio, mmio_pref;
+
+ /*
+ * Distribute available extra resources equally
+ * between hotplug-capable downstream ports
+ * taking alignment into account.
+ *
+ * Here hotplug_bridges is always != 0.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ io = div64_ul(available_io, hotplug_bridges);
+ io = min(ALIGN(io, align), remaining_io);
+ remaining_io -= io;
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ mmio = div64_ul(available_mmio, hotplug_bridges);
+ mmio = min(ALIGN(mmio, align), remaining_mmio);
+ remaining_mmio -= mmio;
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ mmio_pref = div64_ul(available_mmio_pref,
+ hotplug_bridges);
+ mmio_pref = min(ALIGN(mmio_pref, align),
+ remaining_mmio_pref);
+ remaining_mmio_pref -= mmio_pref;
+
+ pci_bus_distribute_available_resources(b, add_list, io,
+ mmio, mmio_pref);
+ }
+ }
+}
+
+static void
+pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ struct list_head *add_list)
+{
+ resource_size_t available_io, available_mmio, available_mmio_pref;
+ const struct resource *res;
+
+ if (!bridge->is_hotplug_bridge)
+ return;
+
+ /* Take the initial extra resources from the hotplug port */
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ available_io = resource_size(res);
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ available_mmio = resource_size(res);
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ available_mmio_pref = resource_size(res);
+
+ pci_bus_distribute_available_resources(bridge->subordinate,
+ add_list, available_io, available_mmio, available_mmio_pref);
+}
+
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
@@ -1862,11 +2032,17 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
int retval;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
again:
__pci_bus_size_bridges(parent, &add_list);
+
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend the
+ * hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
+
__pci_bridge_assign_resources(bridge, &add_list, &fail_head);
BUG_ON(!list_empty(&add_list));
tried_times++;
@@ -1889,7 +2065,7 @@ again:
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & type_mask,
+ fail_res->flags & PCI_RES_TYPE_MASK,
whole_subtree);
/* restore size and flags */
@@ -1914,6 +2090,104 @@ enable_all:
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+{
+ struct pci_dev_resource *dev_res;
+ struct pci_dev *next;
+ LIST_HEAD(saved);
+ LIST_HEAD(added);
+ LIST_HEAD(failed);
+ unsigned int i;
+ int ret;
+
+ /* Walk to the root hub, releasing bridge BARs when possible */
+ next = bridge;
+ do {
+ bridge = next;
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
+ i++) {
+ struct resource *res = &bridge->resource[i];
+
+ if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
+ continue;
+
+ /* Ignore BARs which are still in use */
+ if (res->child)
+ continue;
+
+ ret = add_to_list(&saved, bridge, res, 0, 0);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&bridge->dev, "BAR %d: releasing %pR\n",
+ i, res);
+
+ if (res->parent)
+ release_resource(res);
+ res->start = 0;
+ res->end = 0;
+ break;
+ }
+ if (i == PCI_BRIDGE_RESOURCE_END)
+ break;
+
+ next = bridge->bus ? bridge->bus->self : NULL;
+ } while (next);
+
+ if (list_empty(&saved))
+ return -ENOENT;
+
+ __pci_bus_size_bridges(bridge->subordinate, &added);
+ __pci_bridge_assign_resources(bridge, &added, &failed);
+ BUG_ON(!list_empty(&added));
+
+ if (!list_empty(&failed)) {
+ ret = -ENOSPC;
+ goto cleanup;
+ }
+
+ list_for_each_entry(dev_res, &saved, list) {
+ /* Skip the bridge we just assigned resources for. */
+ if (bridge == dev_res->dev)
+ continue;
+
+ bridge = dev_res->dev;
+ pci_setup_bridge(bridge->subordinate);
+ }
+
+ free_list(&saved);
+ return 0;
+
+cleanup:
+ /* restore size and flags */
+ list_for_each_entry(dev_res, &failed, list) {
+ struct resource *res = dev_res->res;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+ }
+ free_list(&failed);
+
+ /* Revert to the old configuration */
+ list_for_each_entry(dev_res, &saved, list) {
+ struct resource *res = dev_res->res;
+
+ bridge = dev_res->dev;
+ i = res - bridge->resource;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+
+ pci_claim_resource(bridge, i);
+ pci_setup_bridge(bridge->subordinate);
+ }
+ free_list(&saved);
+
+ return ret;
+}
+
void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1921,10 +2195,9 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
want additional resources */
down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_bridge(dev) && pci_has_subordinate(dev))
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
+ for_each_pci_bridge(dev, bus)
+ if (pci_has_subordinate(dev))
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
BUG_ON(!list_empty(&add_list));
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c039149cacb0..e815111f3f81 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -397,6 +397,64 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
return 0;
}
+void pci_release_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+
+ dev_info(&dev->dev, "BAR %d: releasing %pR\n", resno, res);
+ release_resource(res);
+ res->end = resource_size(res) - 1;
+ res->start = 0;
+ res->flags |= IORESOURCE_UNSET;
+}
+EXPORT_SYMBOL(pci_release_resource);
+
+int pci_resize_resource(struct pci_dev *dev, int resno, int size)
+{
+ struct resource *res = dev->resource + resno;
+ int old, ret;
+ u32 sizes;
+ u16 cmd;
+
+ /* Make sure the resource isn't assigned before resizing it. */
+ if (!(res->flags & IORESOURCE_UNSET))
+ return -EBUSY;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_MEMORY)
+ return -EBUSY;
+
+ sizes = pci_rebar_get_possible_sizes(dev, resno);
+ if (!sizes)
+ return -ENOTSUPP;
+
+ if (!(sizes & BIT(size)))
+ return -EINVAL;
+
+ old = pci_rebar_get_current_size(dev, resno);
+ if (old < 0)
+ return old;
+
+ ret = pci_rebar_set_size(dev, resno, size);
+ if (ret)
+ return ret;
+
+ res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
+
+ /* Check if the new config works by trying to assign everything. */
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+
+ return 0;
+
+error_resize:
+ pci_rebar_set_size(dev, resno, old);
+ res->end = res->start + pci_rebar_size_to_bytes(old) - 1;
+ return ret;
+}
+EXPORT_SYMBOL(pci_resize_resource);
+
int pci_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index af81b2dec42e..da45dbea20ce 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -943,7 +943,7 @@ static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
-const struct event_reg {
+static const struct event_reg {
size_t offset;
u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
size_t offset, int index);