summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-02-06 01:33:28 +0300
committerDavid S. Miller <davem@davemloft.net>2015-02-06 01:33:28 +0300
commit6e03f896b52cd2ca88942170c5c9c407ec0ede69 (patch)
tree48ca9a6efa5f99819667538838bab3679416f92c /drivers
parentdb79a621835ee91d3e10177abd97f48e0a4dcf9b (diff)
parent9d82f5eb3376cbae96ad36a063a9390de1694546 (diff)
downloadlinux-6e03f896b52cd2ca88942170c5c9c407ec0ede69.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/vxlan.c drivers/vhost/net.c include/linux/if_vlan.h net/core/dev.c The net/core/dev.c conflict was the overlap of one commit marking an existing function static whilst another was adding a new function. In the include/linux/if_vlan.h case, the type used for a local variable was changed in 'net', whereas the function got rewritten to fix a stacked vlan bug in 'net-next'. In drivers/vhost/net.c, Al Viro's iov_iter conversions in 'net-next' overlapped with an endainness fix for VHOST 1.0 in 'net'. In drivers/net/vxlan.c, vxlan_find_vni() added a 'flags' parameter in 'net-next' whereas in 'net' there was a bug fix to pass in the correct network namespace pointer in calls to this function. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/acpi_lpss.c35
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpiolib-sysfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/md/bitmap.c13
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/macvtap.c16
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/sr9700.c36
-rw-r--r--drivers/net/usb/sr9700.h66
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/Kconfig6
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/pci/host/pcie-designware.c3
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c3
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c14
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vhost/net.c14
111 files changed, 1024 insertions, 773 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c70d6e45dc10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
-source "drivers/soc/Kconfig"
-
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 4f3febf8a589..e75737fd7eef 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1,7 +1,7 @@
/*
* ACPI support for Intel Lynxpoint LPSS.
*
- * Copyright (C) 2013, 2014, Intel Corporation
+ * Copyright (C) 2013, Intel Corporation
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_CLK_DIVIDER BIT(2)
#define LPSS_LTR BIT(3)
#define LPSS_SAVE_CTX BIT(4)
-#define LPSS_DEV_PROXY BIT(5)
-#define LPSS_PROXY_REQ BIT(6)
struct lpss_private_data;
@@ -72,10 +70,8 @@ struct lpss_device_desc {
void (*setup)(struct lpss_private_data *pdata);
};
-static struct device *proxy_device;
-
static struct lpss_device_desc lpss_dma_desc = {
- .flags = LPSS_CLK | LPSS_PROXY_REQ,
+ .flags = LPSS_CLK,
};
struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
};
static struct lpss_device_desc byt_uart_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
- LPSS_DEV_PROXY,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = lpss_uart_setup,
};
static struct lpss_device_desc byt_spi_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
- LPSS_DEV_PROXY,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
};
static struct lpss_device_desc byt_sdio_dev_desc = {
- .flags = LPSS_CLK | LPSS_DEV_PROXY,
+ .flags = LPSS_CLK,
};
static struct lpss_device_desc byt_i2c_dev_desc = {
- .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
+ .flags = LPSS_CLK | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
};
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev);
if (!IS_ERR_OR_NULL(pdev)) {
- if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
- proxy_device = &pdev->dev;
return 1;
}
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
- ret = acpi_dev_runtime_suspend(dev);
- if (ret)
- return ret;
-
- if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
- return pm_runtime_put_sync_suspend(proxy_device);
-
- return 0;
+ return acpi_dev_runtime_suspend(dev);
}
static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
- ret = pm_runtime_get_sync(proxy_device);
- if (ret)
- return ret;
- }
-
ret = acpi_dev_runtime_resume(dev);
if (ret)
return ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened. We
- * drop it again if there is no overlap.
- *
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
- int counter;
+ int counter = 0;
if (!rbd_dev->parent_spec)
return false;
- counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- if (counter > 0 && rbd_dev->parent_overlap)
- return true;
-
- /* Image was flattened, but parent is not yet torn down */
+ down_read(&rbd_dev->header_rwsem);
+ if (rbd_dev->parent_overlap)
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+ up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
- return false;
+ return counter > 0;
}
/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
- smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially.
*/
rbd_dev->parent_overlap = overlap;
- smp_mb();
if (!overlap) {
/* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
- /* Drop parent reference unless it's already been done (or none) */
-
- if (rbd_dev->parent_overlap)
- rbd_dev_parent_put(rbd_dev);
+ rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index da9c316059bc..eea5d7e578c9 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
} else {
pdata = dev_get_platdata(&client->dev);
- if (!pdata || !gpio_is_valid(pdata->base)) {
- dev_dbg(&client->dev, "invalid platform data\n");
- return -EINVAL;
+ if (!pdata) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct mcp23s08_platform_data),
+ GFP_KERNEL);
+ pdata->base = -1;
}
}
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
} else {
type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev);
- if (!pdata || !gpio_is_valid(pdata->base)) {
- dev_dbg(&spi->dev,
- "invalid or missing platform data\n");
- return -EINVAL;
+ if (!pdata) {
+ pdata = devm_kzalloc(&spi->dev,
+ sizeof(struct mcp23s08_platform_data),
+ GFP_KERNEL);
+ pdata->base = -1;
}
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 30646cfe0efa..f476ae2eb0b3 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -88,6 +88,8 @@ struct gpio_bank {
#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
#define LINE_USED(line, offset) (line & (BIT(offset)))
+static void omap_gpio_unmask_irq(struct irq_data *d);
+
static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
return readl_relaxed(reg) & mask;
}
+static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
+ unsigned offset)
+{
+ if (!LINE_USED(bank->mod_usage, offset)) {
+ omap_enable_gpio_module(bank, offset);
+ omap_set_gpio_direction(bank, offset, 1);
+ }
+ bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+}
+
static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
spin_lock_irqsave(&bank->lock, flags);
offset = GPIO_INDEX(bank, gpio);
retval = omap_set_gpio_triggering(bank, offset, type);
- if (!LINE_USED(bank->mod_usage, offset)) {
- omap_enable_gpio_module(bank, offset);
- omap_set_gpio_direction(bank, offset, 1);
- } else if (!omap_gpio_is_input(bank, BIT(offset))) {
+ omap_gpio_init_irq(bank, gpio, offset);
+ if (!omap_gpio_is_input(bank, BIT(offset))) {
spin_unlock_irqrestore(&bank->lock, flags);
return -EINVAL;
}
-
- bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
pm_runtime_put(bank->dev);
}
+static unsigned int omap_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+ unsigned long flags;
+ unsigned offset = GPIO_INDEX(bank, gpio);
+
+ if (!BANK_USED(bank))
+ pm_runtime_get_sync(bank->dev);
+
+ spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_init_irq(bank, gpio, offset);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ omap_gpio_unmask_irq(d);
+
+ return 0;
+}
+
static void omap_gpio_irq_shutdown(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
if (!irqc)
return -ENOMEM;
+ irqc->irq_startup = omap_gpio_irq_startup,
irqc->irq_shutdown = omap_gpio_irq_shutdown,
irqc->irq_ack = omap_gpio_ack_irq,
irqc->irq_mask = omap_gpio_mask_irq,
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index f62aa115d79a..7722ed53bd65 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
if (tdev != NULL) {
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
name);
+ put_device(tdev);
} else {
status = -ENODEV;
}
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
}
status = sysfs_set_active_low(desc, dev, value);
-
+ put_device(dev);
unlock:
mutex_unlock(&sysfs_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 633532a2e7ec..25bc47f3c1cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */
- size = max_num_of_processes *
- max_num_of_queues_per_process *
- kfd->device_info->mqd_size_aligned;
+ size = max_num_of_queues_per_device *
+ kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */
size += 512 * 1024;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 30c8fda9622e..0fd592799d58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) {
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
}
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
dqm->queue_count--;
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe;
+ /*
+ * HPD buffer on GTT is allocated by amdkfd, no need to waste
+ * space in GTT for pipelines we don't initialize
+ */
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
- retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+ retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0)
return retval;
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
execute_queues_cpsch(dqm, false);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type.
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
mutex_unlock(&dqm->lock);
}
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ retval = -EPERM;
+ goto out;
+ }
+
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..52035bf0c1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
+ unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
unsigned int vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..1c385c23dd0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy");
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
- if ((max_num_of_processes < 0) ||
- (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
- pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
- return -1;
- }
-
- if ((max_num_of_queues_per_process < 0) ||
- (max_num_of_queues_per_process >
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
- pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ if ((max_num_of_queues_per_device < 1) ||
+ (max_num_of_queues_per_device >
+ KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+ pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 4c25ef504f79..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void)
{
- pasid_limit = max_num_of_processes;
+ pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b3dc13c83169..96dc10e8904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
*/
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..2fda1927bff7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap,
- max_num_of_queues_per_process);
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found);
- if (found >= max_num_of_queues_per_process) {
+ if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("kfd: error dqm create queue\n");
+ pr_debug("Error dqm create queue\n");
goto err_create_queue;
}
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
kfree(pqn);
err_allocate_pqn:
+ /* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
+ if (list_empty(&pqm->queues))
+ dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
@@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
BUG_ON(!pqm);
pqn = get_queue_by_qid(pqm, qid);
- BUG_ON(!pqn);
+ if (!pqn) {
+ pr_debug("amdkfd: No queue %d exists for update operation\n",
+ qid);
+ return -EFAULT;
+ }
pqn->q->properties.queue_address = p->queue_address;
pqn->q->properties.queue_size = p->queue_size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index c2a1cba1e984..b9140032962d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,9 +16,12 @@
#include "cirrus_drv.h"
int cirrus_modeset = -1;
+int cirrus_bpp = 24;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, cirrus_modeset, int, 0400);
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+module_param_named(bpp, cirrus_bpp, int, 0400);
/*
* This is the generic driver code. This binds the driver to the drm core,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 693a4565c4ff..705061537a27 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
int cirrus_bo_push_sysram(struct cirrus_bo *bo);
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+
+extern int cirrus_bpp;
+
#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4c2d68e9102d..e4b976658087 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
const int max_size = cdev->mc.vram_size;
+ if (bpp > cirrus_bpp)
+ return false;
if (bpp > 32)
return false;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 99d4a74ffeaf..61385f2298bf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
int count;
/* Just add a static list of modes */
- count = drm_add_modes_noedid(connector, 1280, 1024);
- drm_set_preferred_mode(connector, 1024, 768);
+ if (cirrus_bpp <= 24) {
+ count = drm_add_modes_noedid(connector, 1280, 1024);
+ drm_set_preferred_mode(connector, 1024, 768);
+ } else {
+ count = drm_add_modes_noedid(connector, 800, 600);
+ drm_set_preferred_mode(connector, 800, 600);
+ }
return count;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cf775a4449c1..dc386ebe5193 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+static void remove_from_modeset(struct drm_mode_set *set,
+ struct drm_connector *connector)
+{
+ int i, j;
+
+ for (i = 0; i < set->num_connectors; i++) {
+ if (set->connectors[i] == connector)
+ break;
+ }
+
+ if (i == set->num_connectors)
+ return;
+
+ for (j = i + 1; j < set->num_connectors; j++) {
+ set->connectors[j - 1] = set->connectors[j];
+ }
+ set->num_connectors--;
+
+ /* because i915 is pissy about this..
+ * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+ */
+ if (set->num_connectors == 0)
+ set->fb = NULL;
+}
+
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
+
+ /* also cleanup dangling references to the connector: */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
+ struct mutex mutex;
+ struct delayed_work dwork;
uint16_t rev;
uint8_t current_page;
int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg);
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return ret;
+ goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0)
goto fail;
- return ret;
+ goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tda998x_priv *priv =
+ container_of(dwork, struct tda998x_priv, dwork);
+
+ if (priv->encoder && priv->encoder->dev)
+ drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */
- if (priv->encoder && priv->encoder->dev)
- drm_helper_hpd_irq_event(priv->encoder->dev);
+ schedule_delayed_work(&priv->dwork, HZ/10);
}
return IRQ_HANDLED;
}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (priv->hdmi->irq)
+ if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv);
+ cancel_delayed_work_sync(&priv->dwork);
+ }
i2c_unregister_device(priv->cec);
}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
+ unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff;
priv->hdmi = client;
- priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ cec_addr = 0x34 + (client->addr & 0x03);
+ priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec)
return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
+ mutex_init(&priv->mutex); /* protect the page access */
+
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) {
int irqf_trigger;
- /* init read EDID waitqueue */
+ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
+ INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 574057cd1d09..7643300828c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_HSW_ULT(dev));
- } else if (IS_BROADWELL(dev)) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->pch_id =
- INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("This is Broadwell, assuming "
- "LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9f891c432f8..9d7a7155bf02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
- (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 76354d3ba925..5f614828d365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /*
- * XXX: Contexts should only be initialized once. Doing a switch to the
- * default context switch however is something we'd like to do after
- * reset or thaw (the latter may not actually be necessary for HW, but
- * goes with our code better). Context switching requires rings (for
- * the do_switch), but before enabling PPGTT. So don't move this.
- */
- ret = i915_gem_context_enable(dev_priv);
+ ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
-
- return ret;
}
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
+ DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
}
return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..dfb783a8f2c3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level == 0) {
+ if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index a0133c74f4cf..42cd0cffe210 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 4be2bb7cbef3..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+ return addr;
+}
+
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+ uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = rdev->gart.ptr;
-
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
+ return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = rdev->gart.ptr;
+
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(addr, ((void __iomem *)ptr) + (i * 4));
+ writel(entry, ((void __iomem *)ptr) + (i * 4));
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..3f2a8d3febca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
+ uint64_t entry;
struct page *page;
dma_addr_t addr;
};
@@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
- dma_addr_t *pages_addr;
+ uint64_t *pages_entry;
bool ready;
};
@@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
+ uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 121aff6a3b41..ed0e10eee2dc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..8d787d115653 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 9e7f23dd14bd..87d5fb21cb61 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -34,7 +34,8 @@
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
- int flag, int n)
+ int flag, int n,
+ struct reservation_object *resv)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
case RADEON_BENCHMARK_COPY_DMA:
fence = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ resv);
break;
case RADEON_BENCHMARK_COPY_BLIT:
fence = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ resv);
break;
default:
DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_DMA, n);
+ RADEON_BENCHMARK_COPY_DMA, n,
+ dobj->tbo.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_BLIT, n);
+ RADEON_BENCHMARK_COPY_BLIT, n,
+ dobj->tbo.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
+ rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+ RADEON_GART_PAGE_DUMMY);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 102116902a07..913fafa597ad 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div_max = pll->reference_div;
+ else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+ /* fix for problems on RS880 */
+ ref_div_max = min(pll->max_ref_div, 7u);
else
ref_div_max = pll->max_ref_div;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
+
+ if (!r) {
+ int i;
+
+ /* We might have dropped some GART table updates while it wasn't
+ * mapped, restore all entries
+ */
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
+
return r;
}
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
- u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
- page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base,
- RADEON_GART_PAGE_DUMMY);
+ radeon_gart_set_page(rdev, t,
+ rdev->dummy_page.entry);
}
- page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{
unsigned t;
unsigned p;
- uint64_t page_base;
+ uint64_t page_base, page_entry;
int i, j;
if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
- if (rdev->gart.ptr) {
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base, flags);
- page_base += RADEON_GPU_PAGE_SIZE;
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ page_entry = radeon_gart_get_page_entry(page_base, flags);
+ rdev->gart.pages_entry[t] = page_entry;
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_entry);
}
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages);
- if (rdev->gart.pages_addr == NULL) {
+ rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+ rdev->gart.num_gpu_pages);
+ if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
- for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
- rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
- }
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
}
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
- if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ if (rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
- vfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
- rdev->gart.pages_addr = NULL;
+ rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d0b4f7d1140d..ac3c1310b953 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
struct radeon_bo_va *bo_va;
int r;
- if (rdev->family < CHIP_CAYMAN) {
+ if ((rdev->family < CHIP_CAYMAN) ||
+ (!rdev->accel_working)) {
return 0;
}
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_bo_va *bo_va;
int r;
- if (rdev->family < CHIP_CAYMAN) {
+ if ((rdev->family < CHIP_CAYMAN) ||
+ (!rdev->accel_working)) {
return;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 8bf87f1203cc..bef9a0953284 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr)
{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3cf9c1fa6475..686411e4e4f6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- vm = &fpriv->vm;
- r = radeon_vm_init(rdev, vm);
- if (r) {
- kfree(fpriv);
- return r;
- }
-
if (rdev->accel_working) {
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
+ if (r) {
+ kfree(fpriv);
+ return r;
+ }
+
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
+ radeon_vm_fini(rdev, vm);
}
- radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 07b506b41008..791818165c76 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- NULL);
+ vram_obj->tbo.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..2a5a4a9e772d 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
- result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+ result &= ~RADEON_GPU_PAGE_MASK;
return result;
}
@@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*/
/* NI is optimized for 256KB fragments, SI and newer for 64KB */
- uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+ uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
+ (rdev->family == CHIP_ARUBA)) ?
R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
- uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+ uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
+ (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
uint64_t frag_start = ALIGN(pe_start, frag_align);
uint64_t frag_end = pe_end & ~(frag_align - 1);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
uint32_t entry;
- u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
- entry = cpu_to_le32(entry);
- gtt[i] = entry;
+ return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ u32 *gtt = rdev->gart.ptr;
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..74bce91aecc1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
-
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
- writeq(addr, ptr + (i * 8));
+ return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+ writeq(entry, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index aa7b872b2c43..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
}
mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
- else if (hide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
+ else if (hide_svga)
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
- }
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
- mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->hw_lock);
+ spin_lock_init(&dev_priv->waiter_lock);
+ spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv);
- if (unlikely(ret != 0)) {
- mutex_unlock(&dev_priv->hw_mutex);
+ if (unlikely(ret != 0))
goto out_err0;
- }
/*
* Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size;
- mutex_unlock(&dev_priv->hw_mutex);
-
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- mutex_unlock(&dev_priv->hw_mutex);
}
if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- mutex_unlock(&dev_priv->hw_mutex);
/**
* Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size;
bool has_gmr;
bool has_mob;
- struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ spinlock_t cap_lock;
/*
* VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ spinlock_t waiter_lock;
+ int fence_queue_waiters; /* Protected by waiter_lock */
+ int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv;
}
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- uint32_t val;
+ unsigned long irq_flags;
+ u32 val;
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
return val;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work, ping_work;
+ struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
return "svga";
}
-static void vmw_fence_ping_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, ping_work);
-
- vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
static bool vmw_fence_enable_signaling(struct fence *f)
{
struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- if (mutex_trylock(&dev_priv->hw_mutex)) {
- vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
- } else
- schedule_work(&fman->ping_work);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
return true;
}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
- INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- (void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!dev_priv->has_mob)
return false;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return (result != 0);
}
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
- mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy);
}
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ static DEFINE_SPINLOCK(ping_lock);
+ unsigned long irq_flags;
+ /*
+ * The ping_lock is needed because we don't have an atomic
+ * test-and-set of the SVGA_FIFO_BUSY register.
+ */
+ spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_fifo_ping_host_locked(dev_priv, reason);
-
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock_irqrestore(&ping_lock, irq_flags);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
if (interruptible)
ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return 0;
}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
- uint32_t busy;
- mutex_lock(&dev_priv->hw_mutex);
- busy = vmw_read(dev_priv, SVGA_REG_BUSY);
- mutex_unlock(&dev_priv->hw_mutex);
-
- return (busy == 0);
+ return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
- mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
- mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
int ret;
pm_runtime_get_sync(&adap->dev);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN) {
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return ret;
}
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
udelay(100);
}
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return -EREMOTEIO;
}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
if (ret != 0) {
dev_err(&pdev->dev, "I2C controller init failed\n");
return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ clk_unprepare(i2c->clk);
return ret;
}
}
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
s3c24xx_i2c_deregister_cpufreq(i2c);
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ clk_unprepare(i2c->clk);
+
pm_runtime_disable(&i2c->adap.dev);
pm_runtime_disable(&pdev->dev);
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
if (!IS_ERR(i2c->sysreg))
regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
int pos;
int sr;
bool send_stop;
+ bool stop_after_dma;
struct resource *res;
struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
if (pd->pos == pd->msg->len) {
/* Send stop if we haven't yet (DMA case) */
- if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+ if (pd->send_stop && pd->stop_after_dma)
i2c_op(pd, OP_TX_STOP, 0);
return 1;
}
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
real_pos = pd->pos - 2;
if (pd->pos == pd->msg->len) {
+ if (pd->stop_after_dma) {
+ /* Simulate PIO end condition after DMA transfer */
+ i2c_op(pd, OP_RX_STOP, 0);
+ pd->pos++;
+ break;
+ }
+
if (real_pos < 0) {
i2c_op(pd, OP_RX_STOP, 0);
break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
sh_mobile_i2c_dma_unmap(pd);
pd->pos = pd->msg->len;
+ pd->stop_after_dma = true;
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
bool do_start = pd->send_stop || !i;
msg = &msgs[i];
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+ pd->stop_after_dma = false;
err = start_ch(pd, msg, do_start);
if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
return ret;
}
EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e6c23b9eab33..5db1a8cc388d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
- [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
};
static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 8ba80a6d3a46..d7562beb5423 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,15 +98,9 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- /*
- * For IPOIB_MCAST_FLAG_BUSY
- * When set, in flight join and mcast->mc is unreliable
- * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
- * haven't started yet
- * When clear and mcast->mc is valid pointer, join was successful
- */
- IPOIB_MCAST_FLAG_BUSY = 2,
+ IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
+ IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
- struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev, int flush);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+int ipoib_ib_dev_down(struct net_device *dev, int flush);
+int ipoib_ib_dev_stop(struct net_device *dev, int flush);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(priv->wq,
+ queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(priv->wq, &priv->cm.rx_reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(priv->wq, &priv->cm.rx_reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(priv->wq, &priv->cm.reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(priv->wq, &priv->cm.reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(priv->wq, &priv->cm.start_task);
+ queue_work(ipoib_workqueue, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(priv->wq, &priv->cm.reap_task);
+ queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(priv->wq, &priv->cm.skb_task);
+ queue_work(ipoib_workqueue, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(priv->wq,
+ queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe65abb5150c..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev)
+int ipoib_ib_dev_open(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev);
+ ipoib_ib_dev_stop(dev, flush);
return -1;
}
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev)
+int ipoib_ib_dev_down(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, flush);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev)
+int ipoib_ib_dev_stop(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
/* Wait for all AHs to be reaped */
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
- flush_workqueue(priv->wq);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
begin = jiffies;
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev)) {
+ if (ipoib_ib_dev_open(dev, 1)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_down(dev, 0);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev) != 0)
+ ipoib_ib_dev_stop(dev, 0);
+ if (ipoib_ib_dev_open(dev, 0) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6bad17d4d588..58b5aa3b6f2d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev)) {
+ if (ipoib_ib_dev_open(dev, 1)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev);
+ ipoib_ib_dev_stop(dev, 1);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev);
- ipoib_ib_dev_stop(dev);
+ ipoib_ib_dev_down(dev, 1);
+ ipoib_ib_dev_stop(dev, 0);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(priv->wq, &priv->restart_task);
+ queue_work(ipoib_workqueue, &priv->restart_task);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out;
+ goto out_neigh_hash_cleanup;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
- /*
- * Must be after ipoib_ib_dev_init so we can allocate a per
- * device wq there and use it here
- */
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out_dev_uninit;
-
return 0;
-out_dev_uninit:
- ipoib_ib_dev_cleanup(dev);
-
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
+out_neigh_hash_cleanup:
+ ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
- /*
- * Must be before ipoib_ib_dev_cleanup or we delete an in use
- * work queue
- */
- ipoib_neigh_hash_uninit(dev);
-
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
+
+ ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
+ flush_workqueue(ipoib_workqueue);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
+ flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
- *
- * In addition, bringing one device up and another down at the
- * same time can deadlock a single workqueue, so we have this
- * global fallback workqueue, but we also attempt to open a
- * per device workqueue each time we bring an interface up
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bc50dd0d0e4d..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
+
+ if (!ipoib_cm_admin_enabled(dev)) {
+ rtnl_lock();
+ dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+ rtnl_unlock();
+ }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
- /*
- * We have to take the mutex to force mcast_sendonly_join to
- * return from ib_sa_multicast_join and set mcast->mc to a
- * valid value. Otherwise we were racing with ourselves in
- * that we might fail here, but get a valid return from
- * ib_sa_multicast_join after we had cleared mcast->mc here,
- * resulting in mis-matched joins and leaves and a deadlock
- */
- mutex_lock(&mcast_mutex);
-
/* We trap for port events ourselves. */
if (status == -ENETRESET)
- goto out;
+ return 0;
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (status) {
if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
- "join failed for %pI6, status %d\n",
+ ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
mcast->mcmember.mgid.raw, status);
/* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_unlock_bh(dev);
+
+ /* Clear the busy flag so we try again */
+ status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
+ &mcast->flags);
}
-out:
- clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- if (status)
- mcast->mc = NULL;
- complete(&mcast->done);
- if (status == -ENETRESET)
- status = 0;
- mutex_unlock(&mcast_mutex);
return status;
}
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
int ret = 0;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
- "multicast joins\n");
+ ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
return -ENODEV;
}
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
- "sendonly join\n");
+ if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
return -EBUSY;
}
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
rec.port_gid = priv->local_gid;
rec.pkey = cpu_to_be16(priv->pkey);
- mutex_lock(&mcast_mutex);
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
if (IS_ERR(mcast->mc)) {
ret = PTR_ERR(mcast->mc);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- complete(&mcast->done);
- ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
- "failed (ret = %d)\n", ret);
+ ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
+ ret);
} else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
- "sendonly join\n", mcast->mcmember.mgid.raw);
+ ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
+ mcast->mcmember.mgid.raw);
}
- mutex_unlock(&mcast_mutex);
return ret;
}
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
carrier_on_task);
struct ib_port_attr attr;
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed.
+ */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed. However, ipoib_stop() will attempt to flush
- * the workqueue while holding the rtnl lock, so loop
- * on trylock until either we get the lock or we see
- * FLAG_ADMIN_UP go away as that signals that we are bailing
- * and can safely ignore the carrier on work.
- */
- while (!rtnl_trylock()) {
- if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- return;
- else
- msleep(20);
- }
- if (!ipoib_cm_admin_enabled(priv->dev))
- dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+ rtnl_lock();
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
mcast->mcmember.mgid.raw, status);
- /*
- * We have to take the mutex to force mcast_join to
- * return from ib_sa_multicast_join and set mcast->mc to a
- * valid value. Otherwise we were racing with ourselves in
- * that we might fail here, but get a valid return from
- * ib_sa_multicast_join after we had cleared mcast->mc here,
- * resulting in mis-matched joins and leaves and a deadlock
- */
- mutex_lock(&mcast_mutex);
-
/* We trap for port events ourselves. */
- if (status == -ENETRESET)
+ if (status == -ENETRESET) {
+ status = 0;
goto out;
+ }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (!status) {
mcast->backoff = 1;
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, 0);
+ mutex_unlock(&mcast_mutex);
/*
- * Defer carrier on work to priv->wq to avoid a
+ * Defer carrier on work to ipoib_workqueue to avoid a
* deadlock on rtnl_lock here.
*/
if (mcast == priv->broadcast)
- queue_work(priv->wq, &priv->carrier_on_task);
- } else {
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- } else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- }
- }
+ queue_work(ipoib_workqueue, &priv->carrier_on_task);
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ status = 0;
+ goto out;
}
-out:
+
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ }
+ }
+
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+
+ /* Clear the busy flag so we try again */
+ status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+ mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
- clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- if (status)
- mcast->mc = NULL;
- complete(&mcast->done);
- if (status == -ENETRESET)
- status = 0;
- if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task,
+ if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-
+out:
+ complete(&mcast->done);
return status;
}
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- mutex_lock(&mcast_mutex);
- init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task,
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task,
mcast->backoff * HZ);
+ mutex_unlock(&mcast_mutex);
}
- mutex_unlock(&mcast_mutex);
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task,
- HZ);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, HZ);
mutex_unlock(&mcast_mutex);
return;
}
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
- !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+ if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
ipoib_mcast_join(dev, priv->broadcast, 0);
return;
}
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
while (1) {
struct ipoib_mcast *mcast = NULL;
- /*
- * Need the mutex so our flags are consistent, need the
- * priv->lock so we don't race with list removals in either
- * mcast_dev_flush or mcast_restart_task
- */
- mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (IS_ERR_OR_NULL(mcast->mc) &&
- !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
- !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
+ && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
+ && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
/* Found the next unjoined group */
break;
}
}
spin_unlock_irq(&priv->lock);
- mutex_unlock(&mcast_mutex);
if (&mcast->list == &priv->multicast_list) {
/* All done */
break;
}
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
- else
- ipoib_mcast_join(dev, mcast, 1);
+ ipoib_mcast_join(dev, mcast, 1);
return;
}
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
cancel_delayed_work(&priv->mcast_task);
mutex_unlock(&mcast_mutex);
- flush_workqueue(priv->wq);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
return 0;
}
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
-
- if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
- if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(priv->wq, &priv->mcast_task, 0);
}
if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
ipoib_dbg_mcast(priv, "no address vector, "
"but multicast join already started\n");
+ else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_mcast_sendonly_join(mcast);
/*
* If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
+ /* seperate between the wait to the leave*/
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
+ ipoib_mcast_stop_thread(dev, 0);
+
local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
- /*
- * We have to cancel outside of the spinlock, but we have to
- * take the rtnl lock or else we race with the removal of
- * entries from the remove list in mcast_dev_flush as part
- * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
- * to signal that we have hit this particular race, and we
- * return since we know we don't need to do anything else
- * anyway.
- */
- while (!rtnl_trylock()) {
- if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- return;
- else
- msleep(20);
- }
+ /* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
- /*
- * Restart our join task if needed
- */
- ipoib_mcast_start_thread(dev);
- rtnl_unlock();
+
+ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ ipoib_mcast_start_thread(dev);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b72a753eb41d..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
- /*
- * the various IPoIB tasks assume they will never race against
- * themselves, so always use a single thread workqueue
- */
- priv->wq = create_singlethread_workqueue("ipoib_wq");
- if (!priv->wq) {
- printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
- return -ENODEV;
- }
-
priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
- goto out_free_wq;
+ return -ENODEV;
}
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
out_free_pd:
ib_dealloc_pd(priv->pd);
-
-out_free_wq:
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
return -ENODEV;
}
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
-
- if (priv->wq) {
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
- }
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 77ecf6d32237..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+ },
+ },
#endif
{ }
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..23e26e0768b5 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN0039",
- "LEN2002", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+ "LEN0039", "LEN2002", "LEN2004",
+ NULL},
1024, 5112, 2024, 4832
},
{
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
- "LEN0037",
+ "LEN0037", /* X1 Carbon 2nd */
"LEN0038",
"LEN0039", /* T440s */
"LEN0041",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 764857b4e268..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Medion Akoya E7225 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
/* Blue FB5601 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
+ .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
return 0;
}
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 0b380603a578..d7c286656a25 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
add_ai(plci, &parms[5]);
sig_req(plci, REJECT, 0);
}
- else if (Reject == 1 || Reject > 9)
+ else if (Reject == 1 || Reject >= 9)
{
add_ai(plci, &parms[5]);
sig_req(plci, HANGUP, 0);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index da3604e73e8a..1695ee5f3ffc 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
/* this page has not been allocated yet */
spin_unlock_irq(&bitmap->lock);
+ /* It is possible that this is being called inside a
+ * prepare_to_wait/finish_wait loop from raid5c:make_request().
+ * In general it is not permitted to sleep in that context as it
+ * can cause the loop to spin freely.
+ * That doesn't apply here as we can only reach this point
+ * once with any loop.
+ * When this function completes, either bp[page].map or
+ * bp[page].hijacked. In either case, this function will
+ * abort before getting to this point again. So there is
+ * no risk of a free-spin, and so it is safe to assert
+ * that sleeping here is allowed.
+ */
+ sched_annotate_sleep();
mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
spin_lock_irq(&bitmap->lock);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 21b156242e42..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
DMERR("could not allocate metadata struct");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
atomic_set(&cmd->ref_count, 1);
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
return cmd;
cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
- if (cmd) {
+ if (!IS_ERR(cmd)) {
mutex_lock(&table_lock);
cmd2 = lookup(bdev);
if (cmd2) {
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
{
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
may_format_device, policy_hint_size);
- if (cmd && !same_params(cmd, data_block_size)) {
+
+ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
dm_cache_metadata_close(cmd);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
return cmd;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 493478989dbd..07705ee181e3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+ dm_device_name(pool->pool_md));
+ return -EINVAL;
+ }
+
if (!strcasecmp(argv[0], "create_thin"))
r = process_create_thin_mesg(argc, argv, pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c1b0d52bfcb0..b98765f6f77f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector,
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
}
+
+ if (rcw > disks && rmw > disks &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ set_bit(STRIPE_DELAYED, &sh->state);
+
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 5e40a8b68cbe..b3b922adc0e4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
cfhsi = netdev_priv(dev);
cfhsi_netlink_parms(data, cfhsi);
- dev_net_set(cfhsi->ndev, src_net);
get_ops = symbol_get(cfhsi_get_ops);
if (!get_ops) {
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 5d3b5202327c..c638c85f3954 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -45,7 +45,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !ARM
---help---
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !ARM
---help---
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 5b22764ba88d..27245efe9f50 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
do {
/* WARNING: MACE_IR is a READ/CLEAR port! */
status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+ if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+ return IRQ_NONE;
pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c036a0e61bba..d41f9f468688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -522,6 +522,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
HASHTBLSZ);
@@ -551,13 +552,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
break;
}
- /* The Queue and Channel counts are zero based so increment them
+ /* The Queue, Channel and TC counts are zero based so increment them
* to get the actual number
*/
hw_feat->rx_q_cnt++;
hw_feat->tx_q_cnt++;
hw_feat->rx_ch_cnt++;
hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
DBGPR("<--xgbe_get_all_hw_features\n");
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 02add385a33d..44b15373d6b3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -373,6 +373,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
+ /* read fpqnum field after dataaddr field */
+ dma_rmb();
if (is_rx_desc(raw_desc))
ret = xgene_enet_rx_frame(ring, raw_desc);
else
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 7403dff8f14a..905ac5f5d9a6 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -32,7 +32,8 @@ config CS89x0
will be called cs89x0.
config CS89x0_PLATFORM
- bool "CS89x0 platform driver support"
+ bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
+ default !HAS_IOPORT_MAP
depends on CS89x0
help
Say Y to compile the cs89x0 driver as a platform driver. This
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3e1a9c1a67a9..fda12fb32ec7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index ad2b4897b392..ebf9d4a42fdd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
static int igbvf_tso(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
+ __be16 protocol)
{
struct e1000_adv_tx_context_desc *context_desc;
struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct e1000_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
+ switch (protocol) {
case htons(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
u8 hdr_len = 0;
int count = 0;
int tso = 0;
+ __be16 protocol = vlan_get_protocol(skb);
if (test_bit(__IGBVF_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -2240,13 +2243,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
IGBVF_TX_FLAGS_VLAN_SHIFT);
}
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IGBVF_TX_FLAGS_IPV4;
first = tx_ring->next_to_use;
tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+ igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
if (unlikely(tso < 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2254,7 +2257,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
if (tso)
tx_flags |= IGBVF_TX_FLAGS_TSO;
- else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IGBVF_TX_FLAGS_CSUM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e4086fea4be2..e9e3a1eb9a97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (!vhdr)
goto out_drop;
- protocol = vhdr->h_vlan_encapsulated_proto;
tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
+ protocol = vlan_get_protocol(skb);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock &&
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c9b49bfb51bb..fe2e10f40df8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
- switch (skb->protocol) {
+ switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 803f17653da7..1409d0cd6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -244,7 +244,8 @@ extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
extern int mlx4_internal_err_reset;
-#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
+ MLX4_MFUNC_MAX))
#define ALL_SLAVES 0xff
struct mlx4_bitmap {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 4d2496f28b85..d4b5085a21fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -968,7 +968,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -993,6 +998,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+ work_done = budget;
}
return work_done;
@@ -1951,7 +1959,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1974,7 +1987,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1996,6 +2014,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* need a repoll */
+ work_done = budget;
}
return work_done;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index dc0058f90370..8011ef3e7707 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status = 0;
+ bool need_restart = netif_running(ndev);
- status = ql_adapter_down(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring down the adapter\n");
- return status;
+ if (need_restart) {
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
}
/* update the features with resent change */
ndev->features = features;
- status = ql_adapter_up(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring up the adapter\n");
- return status;
+ if (need_restart) {
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
}
+
return status;
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 2b719ccd6e7c..2b10b85d8a08 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1123,6 +1123,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
}
+ nskb->queue_mapping = skb->queue_mapping;
dev_kfree_skb(skb);
skb = nskb;
}
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d2af032ff225..58bb4102afac 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -717,7 +717,7 @@ int netvsc_send(struct hv_device *device,
u64 req_id;
unsigned int section_index = NETVSC_INVALID_INDEX;
u32 msg_size = 0;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
u16 q_idx = packet->q_idx;
@@ -744,8 +744,6 @@ int netvsc_send(struct hv_device *device,
packet);
skb = (struct sk_buff *)
(unsigned long)packet->send_completion_tid;
- if (skb)
- dev_kfree_skb_any(skb);
packet->page_buf_cnt = 0;
}
}
@@ -811,6 +809,13 @@ int netvsc_send(struct hv_device *device,
packet, ret);
}
+ if (ret != 0) {
+ if (section_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, section_index);
+ } else if (skb) {
+ dev_kfree_skb_any(skb);
+ }
+
return ret;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index d0ed5694dd7d..e40fdfccc9c1 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include <linux/uio.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
static const struct proto_ops macvtap_socket_ops;
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
- NETIF_F_TSO6)
+ NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
- current->comm);
gso_type = SKB_GSO_UDP;
- if (skb->protocol == htons(ETH_P_IPV6))
- ipv6_proxy_select_ident(skb);
break;
default:
return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
if (arg & TUN_F_TSO6)
feature_mask |= NETIF_F_TSO6;
}
+
+ if (arg & TUN_F_UFO)
+ feature_mask |= NETIF_F_UFO;
}
/* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
* When user space turns off TSO, we turn off GSO/LRO so that
* user-space will not receive TSO frames.
*/
- if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
features |= RX_OFFLOADS;
else
features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625d95d5..b5edc7f96a39 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
/*
* See if we managed to reduce the size of the packet.
*/
- if (olen < isize) {
+ if (olen < isize && olen <= osize) {
state->stats.comp_bytes += olen;
state->stats.comp_packets++;
} else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3ff8cd7bf74d..ad7d3d5f3ee5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,7 +65,6 @@
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
@@ -186,7 +185,7 @@ struct tun_struct {
struct net_device *dev;
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
- NETIF_F_TSO6)
+ NETIF_F_TSO6|NETIF_F_UFO)
int vnet_hdr_sz;
int sndbuf;
@@ -1166,8 +1165,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- skb_reset_network_header(skb);
-
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1178,20 +1175,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- {
- static bool warned;
-
- if (!warned) {
- warned = true;
- netdev_warn(tun->dev,
- "%s: using disabled UFO feature; please fix this program\n",
- current->comm);
- }
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- if (skb->protocol == htons(ETH_P_IPV6))
- ipv6_proxy_select_ident(skb);
break;
- }
default:
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
@@ -1220,6 +1205,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
+ skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
@@ -1297,6 +1283,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else {
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
@@ -1752,6 +1740,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ if (arg & TUN_F_UFO) {
+ features |= NETIF_F_UFO;
+ arg &= ~TUN_F_UFO;
+ }
}
/* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 99b69af14274..4a1e9c489f1f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
int ret;
udelay(1);
- ret = sr_read_reg(dev, EPCR, &tmp);
+ ret = sr_read_reg(dev, SR_EPCR, &tmp);
if (ret < 0)
return ret;
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
mutex_lock(&dev->phy_mutex);
- sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
- sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+ sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
ret = wait_phy_eeprom_ready(dev, phy);
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, EPCR, 0x0);
- ret = sr_read(dev, EPDR, 2, value);
+ sr_write_reg(dev, SR_EPCR, 0x0);
+ ret = sr_read(dev, SR_EPDR, 2, value);
netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
mutex_lock(&dev->phy_mutex);
- ret = sr_write(dev, EPDR, 2, &value);
+ ret = sr_write(dev, SR_EPDR, 2, &value);
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
- sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+ sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
(EPCR_WEP | EPCR_ERPRW));
ret = wait_phy_eeprom_ready(dev, phy);
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, EPCR, 0x0);
+ sr_write_reg(dev, SR_EPCR, 0x0);
out_unlock:
mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (loc == MII_BMSR) {
u8 value;
- sr_read_reg(dev, NSR, &value);
+ sr_read_reg(dev, SR_NSR, &value);
if (value & NSR_LINKST)
rc = 1;
}
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
int rc = 0;
/* Get the Link Status directly */
- sr_read_reg(dev, NSR, &value);
+ sr_read_reg(dev, SR_NSR, &value);
if (value & NSR_LINKST)
rc = 1;
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
}
}
- sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
- sr_write_reg_async(dev, RCR, rx_ctl);
+ sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
+ sr_write_reg_async(dev, SR_RCR, rx_ctl);
}
static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
}
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- sr_write_async(dev, PAR, 6, netdev->dev_addr);
+ sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
return 0;
}
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
mii->phy_id_mask = 0x1f;
mii->reg_num_mask = 0x1f;
- sr_write_reg(dev, NCR, NCR_RST);
+ sr_write_reg(dev, SR_NCR, NCR_RST);
udelay(20);
/* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
* EEPROM automatically to PAR. In case there is no EEPROM externally,
* a default MAC address is stored in PAR for making chip work properly.
*/
- if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+ if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
netdev_err(netdev, "Error reading MAC address\n");
ret = -ENODEV;
goto out;
}
/* power up and reset phy */
- sr_write_reg(dev, PRR, PRR_PHY_RST);
+ sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
/* at least 10ms, here 20ms for safe */
mdelay(20);
- sr_write_reg(dev, PRR, 0);
+ sr_write_reg(dev, SR_PRR, 0);
/* at least 1ms, here 2ms for reading right register */
udelay(2 * 1000);
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index fd687c575e74..258b030277e7 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -14,13 +14,13 @@
/* sr9700 spec. register table on Linux platform */
/* Network Control Reg */
-#define NCR 0x00
+#define SR_NCR 0x00
#define NCR_RST (1 << 0)
#define NCR_LBK (3 << 1)
#define NCR_FDX (1 << 3)
#define NCR_WAKEEN (1 << 6)
/* Network Status Reg */
-#define NSR 0x01
+#define SR_NSR 0x01
#define NSR_RXRDY (1 << 0)
#define NSR_RXOV (1 << 1)
#define NSR_TX1END (1 << 2)
@@ -30,7 +30,7 @@
#define NSR_LINKST (1 << 6)
#define NSR_SPEED (1 << 7)
/* Tx Control Reg */
-#define TCR 0x02
+#define SR_TCR 0x02
#define TCR_CRC_DIS (1 << 1)
#define TCR_PAD_DIS (1 << 2)
#define TCR_LC_CARE (1 << 3)
@@ -38,7 +38,7 @@
#define TCR_EXCECM (1 << 5)
#define TCR_LF_EN (1 << 6)
/* Tx Status Reg for Packet Index 1 */
-#define TSR1 0x03
+#define SR_TSR1 0x03
#define TSR1_EC (1 << 2)
#define TSR1_COL (1 << 3)
#define TSR1_LC (1 << 4)
@@ -46,7 +46,7 @@
#define TSR1_LOC (1 << 6)
#define TSR1_TLF (1 << 7)
/* Tx Status Reg for Packet Index 2 */
-#define TSR2 0x04
+#define SR_TSR2 0x04
#define TSR2_EC (1 << 2)
#define TSR2_COL (1 << 3)
#define TSR2_LC (1 << 4)
@@ -54,7 +54,7 @@
#define TSR2_LOC (1 << 6)
#define TSR2_TLF (1 << 7)
/* Rx Control Reg*/
-#define RCR 0x05
+#define SR_RCR 0x05
#define RCR_RXEN (1 << 0)
#define RCR_PRMSC (1 << 1)
#define RCR_RUNT (1 << 2)
@@ -62,87 +62,87 @@
#define RCR_DIS_CRC (1 << 4)
#define RCR_DIS_LONG (1 << 5)
/* Rx Status Reg */
-#define RSR 0x06
+#define SR_RSR 0x06
#define RSR_AE (1 << 2)
#define RSR_MF (1 << 6)
#define RSR_RF (1 << 7)
/* Rx Overflow Counter Reg */
-#define ROCR 0x07
+#define SR_ROCR 0x07
#define ROCR_ROC (0x7F << 0)
#define ROCR_RXFU (1 << 7)
/* Back Pressure Threshold Reg */
-#define BPTR 0x08
+#define SR_BPTR 0x08
#define BPTR_JPT (0x0F << 0)
#define BPTR_BPHW (0x0F << 4)
/* Flow Control Threshold Reg */
-#define FCTR 0x09
+#define SR_FCTR 0x09
#define FCTR_LWOT (0x0F << 0)
#define FCTR_HWOT (0x0F << 4)
/* rx/tx Flow Control Reg */
-#define FCR 0x0A
+#define SR_FCR 0x0A
#define FCR_FLCE (1 << 0)
#define FCR_BKPA (1 << 4)
#define FCR_TXPEN (1 << 5)
#define FCR_TXPF (1 << 6)
#define FCR_TXP0 (1 << 7)
/* Eeprom & Phy Control Reg */
-#define EPCR 0x0B
+#define SR_EPCR 0x0B
#define EPCR_ERRE (1 << 0)
#define EPCR_ERPRW (1 << 1)
#define EPCR_ERPRR (1 << 2)
#define EPCR_EPOS (1 << 3)
#define EPCR_WEP (1 << 4)
/* Eeprom & Phy Address Reg */
-#define EPAR 0x0C
+#define SR_EPAR 0x0C
#define EPAR_EROA (0x3F << 0)
#define EPAR_PHY_ADR_MASK (0x03 << 6)
#define EPAR_PHY_ADR (0x01 << 6)
/* Eeprom & Phy Data Reg */
-#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
+#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
/* Wakeup Control Reg */
-#define WCR 0x0F
+#define SR_WCR 0x0F
#define WCR_MAGICST (1 << 0)
#define WCR_LINKST (1 << 2)
#define WCR_MAGICEN (1 << 3)
#define WCR_LINKEN (1 << 5)
/* Physical Address Reg */
-#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
+#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
/* Multicast Address Reg */
-#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
+#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
/* 0x1e unused */
/* Phy Reset Reg */
-#define PRR 0x1F
+#define SR_PRR 0x1F
#define PRR_PHY_RST (1 << 0)
/* Tx sdram Write Pointer Address Low */
-#define TWPAL 0x20
+#define SR_TWPAL 0x20
/* Tx sdram Write Pointer Address High */
-#define TWPAH 0x21
+#define SR_TWPAH 0x21
/* Tx sdram Read Pointer Address Low */
-#define TRPAL 0x22
+#define SR_TRPAL 0x22
/* Tx sdram Read Pointer Address High */
-#define TRPAH 0x23
+#define SR_TRPAH 0x23
/* Rx sdram Write Pointer Address Low */
-#define RWPAL 0x24
+#define SR_RWPAL 0x24
/* Rx sdram Write Pointer Address High */
-#define RWPAH 0x25
+#define SR_RWPAH 0x25
/* Rx sdram Read Pointer Address Low */
-#define RRPAL 0x26
+#define SR_RRPAL 0x26
/* Rx sdram Read Pointer Address High */
-#define RRPAH 0x27
+#define SR_RRPAH 0x27
/* Vendor ID register */
-#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
+#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
/* Product ID register */
-#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
+#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
/* CHIP Revision register */
-#define CHIPR 0x2C
+#define SR_CHIPR 0x2C
/* 0x2D --> 0xEF unused */
/* USB Device Address */
-#define USBDA 0xF0
+#define SR_USBDA 0xF0
#define USBDA_USBFA (0x7F << 0)
/* RX packet Counter Reg */
-#define RXC 0xF1
+#define SR_RXC 0xF1
/* Tx packet Counter & USB Status Reg */
-#define TXC_USBS 0xF2
+#define SR_TXC_USBS 0xF2
#define TXC_USBS_TXC0 (1 << 0)
#define TXC_USBS_TXC1 (1 << 1)
#define TXC_USBS_TXC2 (1 << 2)
@@ -150,7 +150,7 @@
#define TXC_USBS_SUSFLAG (1 << 6)
#define TXC_USBS_RXFAULT (1 << 7)
/* USB Control register */
-#define USBC 0xF4
+#define SR_USBC 0xF4
#define USBC_EP3NAK (1 << 4)
#define USBC_EP3ACK (1 << 5)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bd71d53c5e0..110a2cf67244 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- {
- static bool warned;
-
- if (!warned) {
- warned = true;
- netdev_warn(dev,
- "host using disabled UFO feature; please fix it\n");
- }
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
- }
case VIRTIO_NET_HDR_GSO_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1752,7 +1745,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
- dev->hw_features |= NETIF_F_TSO
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
@@ -1762,11 +1755,13 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_TSO6;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+ dev->hw_features |= NETIF_F_UFO;
dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
- dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
/* (!csum && gso) case will be fixed by register_netdev() */
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1804,7 +1799,8 @@ static int virtnet_probe(struct virtio_device *vdev)
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -2000,9 +1996,9 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
- VIRTIO_NET_F_GUEST_ECN,
+ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d08072c10aa9..e6ed3e66964d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2625,10 +2625,10 @@ static void vxlan_sock_work(struct work_struct *work)
dev_put(vxlan->dev);
}
-static int vxlan_newlink(struct net *net, struct net_device *dev,
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
__u32 vni;
@@ -2638,7 +2638,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
- vxlan->net = dev_net(dev);
+ vxlan->net = src_net;
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
@@ -2674,7 +2674,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_LINK] &&
(dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
struct net_device *lowerdev
- = __dev_get_by_index(net, dst->remote_ifindex);
+ = __dev_get_by_index(src_net, dst->remote_ifindex);
if (!lowerdev) {
pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2761,7 +2761,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_GBP])
vxlan->flags |= VXLAN_F_GBP;
- if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+ if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
vxlan->dst_port, vxlan->flags)) {
pr_info("duplicate VNI %u\n", vni);
return -EEXIST;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 94e234975c61..a2fdd15f285a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
# There is no way to detect a comtrol sv11 - force it modular for now.
config HOSTESS_SV11
tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC
+ depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
help
Driver for Comtrol Hostess SV-11 network card which
operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
# The COSA/SRP driver has not been tested as non-modular yet.
config COSA
tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API && HDLC
+ depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
---help---
Driver for COSA and SRP synchronous serial boards.
@@ -87,7 +87,7 @@ config LANMEDIA
# There is no way to detect a Sealevel board. Force it modular
config SEALEVEL_4021
tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC
+ depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
help
This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9259a732e8a4..037f74f0fcf6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
goto err_rx_unbind;
}
queue->task = task;
+ get_task_struct(task);
task = kthread_create(xenvif_dealloc_kthread,
(void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
if (queue->task) {
kthread_stop(queue->task);
+ put_task_struct(queue->task);
queue->task = NULL;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 49322b6c32df..13899d5099e5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2008,8 +2008,7 @@ int xenvif_kthread_guest_rx(void *data)
*/
if (unlikely(vif->disabled && queue->id == 0)) {
xenvif_carrier_off(vif);
- xenvif_rx_queue_purge(queue);
- continue;
+ break;
}
if (!skb_queue_empty(&queue->rx_queue))
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index df781cdf13c1..17ca98657a28 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_msg msg;
struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+ if (desc->msi_attrib.is_msix)
+ return -EINVAL;
+
irq = assign_irq(1, desc, &pos);
if (irq < 0)
return irq;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e52356aa09b8..903d5078b5ed 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
+static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+ const char *name)
+{
+ u32 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + pos;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
+
+ if (!region)
+ return;
+
+ res->name = pci_name(dev);
+ res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
+ res->flags |=
+ (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
+ region &= ~(size - 1);
+
+ /* Convert from PCI bus to resource space */
+ bus_region.start = region;
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+ dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+ name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+}
+
/*
* Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
* ver. 1.33 20070103) don't set the correct ISA PCI region header info.
* BAR0 should be 8 bytes; instead, it may be set to something like 8k
* (which conflicts w/ BAR1's memory range).
+ *
+ * CS553x's ISA PCI BARs may also be read-only (ref:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
*/
static void quirk_cs5536_vsa(struct pci_dev *dev)
{
+ static char *name = "CS5536 ISA bridge";
+
if (pci_resource_len(dev, 0) != 8) {
- struct resource *res = &dev->resource[0];
- res->end = res->start + 8 - 1;
- dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
+ quirk_io(dev, 0, 8, name); /* SMB */
+ quirk_io(dev, 1, 256, name); /* GPIO */
+ quirk_io(dev, 2, 64, name); /* MFGPT */
+ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
+ name);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
- int nbanks;
+ int nactive_banks;
uint32_t *mux_mask;
int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
int mux;
/* check if it's a valid config */
- if (pin->bank >= info->nbanks) {
+ if (pin->bank >= gpio_banks) {
dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
- name, index, pin->bank, info->nbanks);
+ name, index, pin->bank, gpio_banks);
return -EINVAL;
}
+ if (!gpio_chips[pin->bank]) {
+ dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+ name, index, pin->bank);
+ return -ENXIO;
+ }
+
if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, gpio_compat)) {
- info->nbanks++;
+ if (of_device_is_available(child))
+ info->nactive_banks++;
} else {
info->nfunctions++;
info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
}
size /= sizeof(*list);
- if (!size || size % info->nbanks) {
- dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+ if (!size || size % gpio_banks) {
+ dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
return -EINVAL;
}
- info->nmux = size / info->nbanks;
+ info->nmux = size / gpio_banks;
info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
at91_pinctrl_child_count(info, np);
- if (info->nbanks < 1) {
+ if (gpio_banks < 1) {
dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
return -EINVAL;
}
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
dev_dbg(&pdev->dev, "mux-mask\n");
tmp = info->mux_mask;
- for (i = 0; i < info->nbanks; i++) {
+ for (i = 0; i < gpio_banks; i++) {
for (j = 0; j < info->nmux; j++, tmp++) {
dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
}
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->groups)
return -ENOMEM;
- dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
- int ret, i, j, k;
+ int ret, i, j, k, ngpio_chips_enabled = 0;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
* to obtain references to the struct gpio_chip * for them, and we
* need this to proceed.
*/
- for (i = 0; i < info->nbanks; i++) {
- if (!gpio_chips[i]) {
- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, info);
- return -EPROBE_DEFER;
- }
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ ngpio_chips_enabled++;
+
+ if (ngpio_chips_enabled < info->nactive_banks) {
+ dev_warn(&pdev->dev,
+ "All GPIO chips are not registered yet (%d/%d)\n",
+ ngpio_chips_enabled, info->nactive_banks);
+ devm_kfree(&pdev->dev, info);
+ return -EPROBE_DEFER;
}
at91_pinctrl_desc.name = dev_name(&pdev->dev);
- at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+ at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
at91_pinctrl_desc.pins = pdesc =
devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
if (!at91_pinctrl_desc.pins)
return -ENOMEM;
- for (i = 0 , k = 0; i < info->nbanks; i++) {
+ for (i = 0, k = 0; i < gpio_banks; i++) {
for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
pdesc->number = k;
pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- for (i = 0; i < info->nbanks; i++)
- pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *at91_gpio)
{
+ struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
- int ret;
+ int ret, i;
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
return ret;
}
- /* Setup chained handler */
- if (at91_gpio->pioc_idx)
- prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
/* The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
- if (prev && prev->next == at91_gpio)
+ gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+ if (!gpiochip_prev) {
+ /* Then register the chain on the parent IRQ */
+ gpiochip_set_chained_irqchip(&at91_gpio->chip,
+ &gpio_irqchip,
+ at91_gpio->pioc_virq,
+ gpio_irq_handler);
return 0;
+ }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
- return 0;
+ /* we can only have 2 banks before */
+ for (i = 0; i < 2; i++) {
+ if (prev->next) {
+ prev = prev->next;
+ } else {
+ prev->next = at91_gpio;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static void at91_gpio_probe_fixup(void)
-{
- unsigned i;
- struct at91_gpio_chip *at91_gpio, *last = NULL;
-
- for (i = 0; i < gpio_banks; i++) {
- at91_gpio = gpio_chips[i];
-
- /*
- * GPIO controller are grouped on some SoC:
- * PIOC, PIOD and PIOE can share the same IRQ line
- */
- if (last && last->pioc_virq == at91_gpio->pioc_virq)
- last->next = at91_gpio;
- last = at91_gpio;
- }
-}
-
static struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- at91_gpio_probe_fixup();
-
ret = at91_gpio_of_irq_setup(pdev, at91_chip);
if (ret)
goto irq_setup_err;
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 1dba62c5cf6a..1efebc9eedfb 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
struct scsi_device *sdev = scsi_dh_data->sdev;
+ scsi_dh->detach(sdev);
+
spin_lock_irq(sdev->request_queue->queue_lock);
sdev->scsi_dh_data = NULL;
spin_unlock_irq(sdev->request_queue->queue_lock);
- scsi_dh->detach(sdev);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
module_put(scsi_dh->module);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 399516925d80..05ea0d49a3a3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
sd_set_flush_flag(sdkp);
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- sdkp->max_xfer_blocks);
+ max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
+
+ max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+ max_xfer);
blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4cda994d3f40..9b80d54d4ddb 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -342,8 +342,7 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
- chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
- GFP_KERNEL);
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
}
@@ -382,6 +381,16 @@ static int dspi_setup(struct spi_device *spi)
return dspi_setup_transfer(spi, NULL);
}
+static void dspi_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+ dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+ spi->master->bus_num, spi->chip_select);
+
+ kfree(chip);
+}
+
static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
@@ -467,6 +476,7 @@ static int dspi_probe(struct platform_device *pdev)
dspi->bitbang.master->setup = dspi_setup;
dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+ master->cleanup = dspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 961b97d43b43..fe1b7699fab6 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -823,6 +823,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
struct dma_slave_config slave_config = {};
int ret;
+ /* use pio mode for i.mx6dl chip TKT238285 */
+ if (of_machine_is_compatible("fsl,imx6dl"))
+ return 0;
+
/* Prepare for TX DMA: */
master->dma_tx = dma_request_slave_channel(dev, "tx");
if (!master->dma_tx) {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..65d610abe06e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
return 0;
}
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..120b70d72d79 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
- .id = 1,
},
{
.name = "nvec-mouse",
- .id = 1,
},
{
.name = "nvec-power",
- .id = 1,
+ .id = 0,
},
{
.name = "nvec-power",
- .id = 2,
+ .id = 1,
},
{
.name = "nvec-paz00",
- .id = 1,
},
};
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+ ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
return 0;
+ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+ return 1;
+
/* NOTE: can't use usb_match_id() since interface caches
* aren't set up yet. this is cut/paste from that code.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Protocol and OTG Electrical Test Device */
+ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(&hsotg->lock);
+
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
- spin_lock(&hsotg->lock);
-
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
}
}
- spin_unlock(&hsotg->lock);
out:
+ spin_unlock(&hsotg->lock);
return retval;
}
EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index ccfdfb24b240..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
return phy;
}
- return ERR_PTR(-EPROBE_DEFER);
+ return ERR_PTR(-ENODEV);
}
static struct usb_phy *__usb_find_phy_dev(struct device *dev,
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
"SCM Microsystems",
"eUSB SCSI Adapter (Bus Powered)",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 6df4357d9ee3..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
"External HDD",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+ "SimpleTech",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e022cc40303d..8dccca9013ed 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -528,9 +528,9 @@ static void handle_rx(struct vhost_net *net)
.msg_controllen = 0,
.msg_flags = MSG_DONTWAIT,
};
- struct virtio_net_hdr hdr = {
- .flags = 0,
- .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ struct virtio_net_hdr_mrg_rxbuf hdr = {
+ .hdr.flags = 0,
+ .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
int err, mergeable;
@@ -614,11 +614,11 @@ static void handle_rx(struct vhost_net *net)
vq->iov->iov_base);
break;
}
- /* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF
- * TODO: Should check and handle checksum.
- */
+ /* TODO: Should check and handle checksum. */
+
+ hdr.num_buffers = cpu_to_vhost16(vq, headcount);
if (likely(mergeable) &&
- copy_to_iter(&headcount, 2, &fixup) != 2) {
+ copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
break;