summaryrefslogtreecommitdiff
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c295
1 files changed, 283 insertions, 12 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6a9a1116f1eb..314db8c1047a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -27,6 +27,7 @@
#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
+#include <linux/aer.h>
#include "pci.h"
const char *pci_power_names[] = {
@@ -458,6 +459,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
EXPORT_SYMBOL(pci_find_parent_resource);
/**
+ * pci_find_pcie_root_port - return PCIe Root Port
+ * @dev: PCI device to query
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI Device.
+ */
+struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
+{
+ struct pci_dev *bridge, *highest_pcie_bridge = NULL;
+
+ bridge = pci_upstream_bridge(dev);
+ while (bridge && pci_is_pcie(bridge)) {
+ highest_pcie_bridge = bridge;
+ bridge = pci_upstream_bridge(bridge);
+ }
+
+ if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
+ return NULL;
+
+ return highest_pcie_bridge;
+}
+EXPORT_SYMBOL(pci_find_pcie_root_port);
+
+/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -484,7 +509,7 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
}
/**
- * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
+ * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
* Restore the BAR values for a given device, so as to make it
@@ -494,6 +519,10 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -1099,6 +1128,8 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
+ pci_cleanup_aer_error_status_regs(dev);
+
pci_restore_config_space(dev);
pci_restore_pcix_state(dev);
@@ -1710,15 +1741,7 @@ static void pci_pme_list_scan(struct work_struct *work)
mutex_unlock(&pci_pme_list_mutex);
}
-/**
- * pci_pme_active - enable or disable PCI device's PME# function
- * @dev: PCI device to handle.
- * @enable: 'true' to enable PME# generation; 'false' to disable it.
- *
- * The caller must verify that the device is capable of generating PME# before
- * calling this function with @enable equal to 'true'.
- */
-void pci_pme_active(struct pci_dev *dev, bool enable)
+static void __pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
@@ -1732,6 +1755,19 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
+/**
+ * pci_pme_active - enable or disable PCI device's PME# function
+ * @dev: PCI device to handle.
+ * @enable: 'true' to enable PME# generation; 'false' to disable it.
+ *
+ * The caller must verify that the device is capable of generating PME# before
+ * calling this function with @enable equal to 'true'.
+ */
+void pci_pme_active(struct pci_dev *dev, bool enable)
+{
+ __pci_pme_active(dev, enable);
/*
* PCI (as opposed to PCIe) PME requires that the device have
@@ -2032,17 +2068,60 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
* reconfigured due to wakeup settings difference between system and runtime
* suspend and the current power state of it is suitable for the upcoming
* (system) transition.
+ *
+ * If the device is not configured for system wakeup, disable PME for it before
+ * returning 'true' to prevent it from waking up the system unnecessarily.
*/
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
if (!pm_runtime_suspended(dev)
- || (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ || pci_target_state(pci_dev) != pci_dev->current_state
|| platform_pci_need_resume(pci_dev))
return false;
- return pci_target_state(pci_dev) == pci_dev->current_state;
+ /*
+ * At this point the device is good to go unless it's been configured
+ * to generate PME at the runtime suspend time, but it is not supposed
+ * to wake up the system. In that case, simply disable PME for it
+ * (it will have to be re-enabled on exit from system resume).
+ *
+ * If the device's power state is D3cold and the platform check above
+ * hasn't triggered, the device's configuration is suitable and we don't
+ * need to manipulate it at all.
+ */
+ spin_lock_irq(&dev->power.lock);
+
+ if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
+ !device_may_wakeup(dev))
+ __pci_pme_active(pci_dev, false);
+
+ spin_unlock_irq(&dev->power.lock);
+ return true;
+}
+
+/**
+ * pci_dev_complete_resume - Finalize resume from system sleep for a device.
+ * @pci_dev: Device to handle.
+ *
+ * If the device is runtime suspended and wakeup-capable, enable PME for it as
+ * it might have been disabled during the prepare phase of system suspend if
+ * the device was not configured for system wakeup.
+ */
+void pci_dev_complete_resume(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
+ if (!pci_dev_run_wake(pci_dev))
+ return;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
+ __pci_pme_active(pci_dev, true);
+
+ spin_unlock_irq(&dev->power.lock);
}
void pci_config_pm_runtime_get(struct pci_dev *pdev)
@@ -2148,6 +2227,198 @@ void pci_pm_init(struct pci_dev *dev)
}
}
+static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
+{
+ unsigned long flags = IORESOURCE_PCI_FIXED;
+
+ switch (prop) {
+ case PCI_EA_P_MEM:
+ case PCI_EA_P_VF_MEM:
+ flags |= IORESOURCE_MEM;
+ break;
+ case PCI_EA_P_MEM_PREFETCH:
+ case PCI_EA_P_VF_MEM_PREFETCH:
+ flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ break;
+ case PCI_EA_P_IO:
+ flags |= IORESOURCE_IO;
+ break;
+ default:
+ return 0;
+ }
+
+ return flags;
+}
+
+static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
+ u8 prop)
+{
+ if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
+ return &dev->resource[bei];
+#ifdef CONFIG_PCI_IOV
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
+ (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
+ return &dev->resource[PCI_IOV_RESOURCES +
+ bei - PCI_EA_BEI_VF_BAR0];
+#endif
+ else if (bei == PCI_EA_BEI_ROM)
+ return &dev->resource[PCI_ROM_RESOURCE];
+ else
+ return NULL;
+}
+
+/* Read an Enhanced Allocation (EA) entry */
+static int pci_ea_read(struct pci_dev *dev, int offset)
+{
+ struct resource *res;
+ int ent_size, ent_offset = offset;
+ resource_size_t start, end;
+ unsigned long flags;
+ u32 dw0, bei, base, max_offset;
+ u8 prop;
+ bool support_64 = (sizeof(resource_size_t) >= 8);
+
+ pci_read_config_dword(dev, ent_offset, &dw0);
+ ent_offset += 4;
+
+ /* Entry size field indicates DWORDs after 1st */
+ ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
+
+ if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
+ goto out;
+
+ bei = (dw0 & PCI_EA_BEI) >> 4;
+ prop = (dw0 & PCI_EA_PP) >> 8;
+
+ /*
+ * If the Property is in the reserved range, try the Secondary
+ * Property instead.
+ */
+ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
+ prop = (dw0 & PCI_EA_SP) >> 16;
+ if (prop > PCI_EA_P_BRIDGE_IO)
+ goto out;
+
+ res = pci_ea_get_resource(dev, bei, prop);
+ if (!res) {
+ dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
+ goto out;
+ }
+
+ flags = pci_ea_flags(dev, prop);
+ if (!flags) {
+ dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
+ goto out;
+ }
+
+ /* Read Base */
+ pci_read_config_dword(dev, ent_offset, &base);
+ start = (base & PCI_EA_FIELD_MASK);
+ ent_offset += 4;
+
+ /* Read MaxOffset */
+ pci_read_config_dword(dev, ent_offset, &max_offset);
+ ent_offset += 4;
+
+ /* Read Base MSBs (if 64-bit entry) */
+ if (base & PCI_EA_IS_64) {
+ u32 base_upper;
+
+ pci_read_config_dword(dev, ent_offset, &base_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry starts above 32-bit boundary, can't use */
+ if (!support_64 && base_upper)
+ goto out;
+
+ if (support_64)
+ start |= ((u64)base_upper << 32);
+ }
+
+ end = start + (max_offset | 0x03);
+
+ /* Read MaxOffset MSBs (if 64-bit entry) */
+ if (max_offset & PCI_EA_IS_64) {
+ u32 max_offset_upper;
+
+ pci_read_config_dword(dev, ent_offset, &max_offset_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry too big, can't use */
+ if (!support_64 && max_offset_upper)
+ goto out;
+
+ if (support_64)
+ end += ((u64)max_offset_upper << 32);
+ }
+
+ if (end < start) {
+ dev_err(&dev->dev, "EA Entry crosses address boundary\n");
+ goto out;
+ }
+
+ if (ent_size != ent_offset - offset) {
+ dev_err(&dev->dev,
+ "EA Entry Size (%d) does not match length read (%d)\n",
+ ent_size, ent_offset - offset);
+ goto out;
+ }
+
+ res->name = pci_name(dev);
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+
+ if (bei <= PCI_EA_BEI_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+ else if (bei == PCI_EA_BEI_ROM)
+ dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+ res, prop);
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei - PCI_EA_BEI_VF_BAR0, res, prop);
+ else
+ dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+
+out:
+ return offset + ent_size;
+}
+
+/* Enhanced Allocation Initalization */
+void pci_ea_init(struct pci_dev *dev)
+{
+ int ea;
+ u8 num_ent;
+ int offset;
+ int i;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return;
+
+ /* determine the number of entries */
+ pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
+ &num_ent);
+ num_ent &= PCI_EA_NUM_ENT_MASK;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+
+ /* Skip DWORD 2 for type 1 functions */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ offset += 4;
+
+ /* parse each EA entry */
+ for (i = 0; i < num_ent; ++i)
+ offset = pci_ea_read(dev, offset);
+}
+
static void pci_add_saved_cap(struct pci_dev *pci_dev,
struct pci_cap_saved_state *new_cap)
{