From 7807e086a2d1f69cc1a57958cac04fea79fc2112 Mon Sep 17 00:00:00 2001 From: Ladislav Michl Date: Sat, 11 Feb 2017 14:02:49 +0100 Subject: ARM: OMAP2+: gpmc-onenand: propagate error on initialization failure gpmc_probe_onenand_child returns success even on gpmc_onenand_init failure. Fix that. Signed-off-by: Ladislav Michl Acked-by: Roger Quadros Signed-off-by: Tony Lindgren --- drivers/memory/omap-gpmc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 5457c361ad58..bf0fe0137dfe 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev, if (!of_property_read_u32(child, "dma-channel", &val)) gpmc_onenand_data->dma_channel = val; - gpmc_onenand_init(gpmc_onenand_data); - - return 0; + return gpmc_onenand_init(gpmc_onenand_data); } #else static int gpmc_probe_onenand_child(struct platform_device *pdev, -- cgit v1.2.3 From 3bec247474469f769af41e8c80d3a100dd97dd76 Mon Sep 17 00:00:00 2001 From: Song Hongyan Date: Wed, 22 Feb 2017 17:17:38 +0800 Subject: iio: hid-sensor-trigger: Change get poll value function order to avoid sensor properties losing after resume from S3 In function _hid_sensor_power_state(), when hid_sensor_read_poll_value() is called, sensor's all properties will be updated by the value from sensor hardware/firmware. In some implementation, sensor hardware/firmware will do a power cycle during S3. In this case, after resume, once hid_sensor_read_poll_value() is called, sensor's all properties which are kept by driver during S3 will be changed to default value. But instead, if a set feature function is called first, sensor hardware/firmware will be recovered to the last status. So change the sensor_hub_set_feature() calling order to behind of set feature function to avoid sensor properties lose. Signed-off-by: Song Hongyan Acked-by: Srinivas Pandruvada Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/common/hid-sensors/hid-sensor-trigger.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index a3cce3a38300..ecf592d69043 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) st->report_state.report_id, st->report_state.index, HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); - - poll_value = hid_sensor_read_poll_value(st); } else { int val; @@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) sensor_hub_get_feature(st->hsdev, st->power_state.report_id, st->power_state.index, sizeof(state_val), &state_val); - if (state && poll_value) + if (state) + poll_value = hid_sensor_read_poll_value(st); + if (poll_value > 0) msleep_interruptible(poll_value * 2); return 0; -- cgit v1.2.3 From 3ff861f59f6c1f5bf2bc03d2cd36ac3f992cbc06 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 1 Mar 2017 15:37:57 -0800 Subject: iio: magnetometer: ak8974: remove incorrect __exit markups Even if bus is not hot-pluggable, devices can be unbound from the driver via sysfs, so we should not be using __exit annotations on remove() methods. The only exception is drivers registered with platform_driver_probe() which specifically disables sysfs bind/unbind attributes. Signed-off-by: Dmitry Torokhov Reviewed-by: Linus Walleij Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/magnetometer/ak8974.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 6dd8cbd7ce95..e13370dc9b1c 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -763,7 +763,7 @@ power_off: return ret; } -static int __exit ak8974_remove(struct i2c_client *i2c) +static int ak8974_remove(struct i2c_client *i2c) { struct iio_dev *indio_dev = i2c_get_clientdata(i2c); struct ak8974 *ak8974 = iio_priv(indio_dev); @@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = { .of_match_table = of_match_ptr(ak8974_of_match), }, .probe = ak8974_probe, - .remove = __exit_p(ak8974_remove), + .remove = ak8974_remove, .id_table = ak8974_id, }; module_i2c_driver(ak8974_driver); -- cgit v1.2.3 From ac8616e4c81dded650dfade49a7da283565d37ce Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 14 Feb 2017 11:35:22 +0800 Subject: clk: sunxi-ng: mp: Adjust parent rate for pre-dividers The MP style clocks support an mux with pre-dividers. While the driver correctly accounted for them in the .determine_rate callback, it did not in the .recalc_rate and .set_rate callbacks. This means when calculating the factors in the .set_rate callback, they would be off by a factor of the active pre-divider. Same goes for reading back the clock rate after it is set. Cc: stable@vger.kernel.org Fixes: 2ab836db5097 ("clk: sunxi-ng: Add M-P factor clock support") Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu_mp.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index 22c2ca7a2a22..b583f186a804 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw, unsigned int m, p; u32 reg; + /* Adjust parent_rate according to pre-dividers */ + ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux, + -1, &parent_rate); + reg = readl(cmp->common.base + cmp->common.reg); m = reg >> cmp->m.shift; @@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned int m, p; u32 reg; + /* Adjust parent_rate according to pre-dividers */ + ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux, + -1, &parent_rate); + max_m = cmp->m.max ?: 1 << cmp->m.width; max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); -- cgit v1.2.3 From 69c9ae5041309553472ee798d7683be31bcdc434 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Feb 2017 22:29:45 +0100 Subject: clk: sunxi: ccu-sun5i needs nkmp A randconfig build ran into this rare link error: drivers/clk/sunxi-ng/ccu-sun5i.o:(.data.__compound_literal.1+0x4): undefined reference to `ccu_nkmp_ops' drivers/clk/sunxi-ng/ccu-sun5i.o:(.data.__compound_literal.7+0x4): undefined reference to `ccu_nkmp_ops' This adds the missing 'select'. Fixes: 5e73761786d6 ("clk: sunxi-ng: Add sun5i CCU driver") Signed-off-by: Arnd Bergmann Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 695bbf9ef428..72109d2cf41b 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -80,6 +80,7 @@ config SUN6I_A31_CCU select SUNXI_CCU_DIV select SUNXI_CCU_NK select SUNXI_CCU_NKM + select SUNXI_CCU_NKMP select SUNXI_CCU_NM select SUNXI_CCU_MP select SUNXI_CCU_PHASE -- cgit v1.2.3 From 9ad0bb39fce319d7b92c17d306ed0a9f70a02e7d Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 14 Feb 2017 10:23:32 +0800 Subject: clk: sunxi-ng: sun6i: Fix enable bit offset for hdmi-ddc module clock The enable bit offset for the hdmi-ddc module clock is wrong. It is pointing to the main hdmi module clock enable bit. Reported-by: Bob Ham Fixes: c6e6c96d8fa6 ("clk: sunxi-ng: Add A31/A31s clocks") Cc: stable@vger.kernel.org # 4.9.x- Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 4c9a920ff4ab..89e68d29bf45 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents, 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); -static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); +static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0); static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); -- cgit v1.2.3 From 9ce9f7999741f342eeffd036ab09213a2fa93040 Mon Sep 17 00:00:00 2001 From: Thor Thayer Date: Mon, 13 Feb 2017 13:49:58 -0600 Subject: gpio: altera-a10sr: Set gpio_chip parent property Set the gpio_chip parent property since some recent functions such as devprop_gpiochip_set_names() can use it. Signed-off-by: Thor Thayer Signed-off-by: Linus Walleij --- drivers/gpio/gpio-altera-a10sr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 9e1a138fed53..16a8951b2bed 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c @@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev) gpio->regmap = a10sr->regmap; gpio->gp = altr_a10sr_gc; - + gpio->gp.parent = pdev->dev.parent; gpio->gp.of_node = pdev->dev.of_node; ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); -- cgit v1.2.3 From fa6256db033067b57318decdc1c583512a1f8f68 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 15 Feb 2017 02:02:06 +0300 Subject: gpio: mockup: return -EFAULT if copy_from_user() fails copy_from_user() returns the number of bytes remaining to be copied but we want to return negative error codes on failue. Fixes: 9202ba2397d1 ("gpio: mockup: implement event injecting over debugfs") Signed-off-by: Dan Carpenter Acked-by: Bartosz Golaszewski Signed-off-by: Linus Walleij --- drivers/gpio/gpio-mockup.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 06dac72cb69c..d99338689213 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file, struct seq_file *sfile; struct gpio_desc *desc; struct gpio_chip *gc; - int status, val; + int val; char buf; sfile = file->private_data; @@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file, chip = priv->chip; gc = &chip->gc; - status = copy_from_user(&buf, usr_buf, 1); - if (status) - return status; + if (copy_from_user(&buf, usr_buf, 1)) + return -EFAULT; if (buf == '0') val = 0; -- cgit v1.2.3 From b115bebc07f282067eccc06fd5aa3060ab1426da Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 17 Feb 2017 16:13:44 +0100 Subject: gpio: xgene: mark PM functions as __maybe_unused When CONFIG_PM_SLEEP is disabled, we get a warning about unused functions: drivers/gpio/gpio-xgene.c:155:12: warning: 'xgene_gpio_resume' defined but not used [-Wunused-function] static int xgene_gpio_resume(struct device *dev) ^~~~~~~~~~~~~~~~~ drivers/gpio/gpio-xgene.c:142:12: warning: 'xgene_gpio_suspend' defined but not used [-Wunused-function] static int xgene_gpio_suspend(struct device *dev) The warnings are harmless and can be avoided by simplifying the code and marking the functions as __maybe_unused. Signed-off-by: Arnd Bergmann Signed-off-by: Linus Walleij --- drivers/gpio/gpio-xgene.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c index 40a8881c2ce8..f1c6ec17b90a 100644 --- a/drivers/gpio/gpio-xgene.c +++ b/drivers/gpio/gpio-xgene.c @@ -42,9 +42,7 @@ struct xgene_gpio { struct gpio_chip chip; void __iomem *base; spinlock_t lock; -#ifdef CONFIG_PM u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; -#endif }; static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) @@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc, return 0; } -#ifdef CONFIG_PM -static int xgene_gpio_suspend(struct device *dev) +static __maybe_unused int xgene_gpio_suspend(struct device *dev) { struct xgene_gpio *gpio = dev_get_drvdata(dev); unsigned long bank_offset; @@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev) return 0; } -static int xgene_gpio_resume(struct device *dev) +static __maybe_unused int xgene_gpio_resume(struct device *dev) { struct xgene_gpio *gpio = dev_get_drvdata(dev); unsigned long bank_offset; @@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev) } static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); -#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm) -#else -#define XGENE_GPIO_PM_OPS NULL -#endif static int xgene_gpio_probe(struct platform_device *pdev) { @@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = { .name = "xgene-gpio", .of_match_table = xgene_gpio_of_match, .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), - .pm = XGENE_GPIO_PM_OPS, + .pm = &xgene_gpio_pm, }, .probe = xgene_gpio_probe, }; -- cgit v1.2.3 From f759921cfbf4847319d197a6ed7c9534d593f8bc Mon Sep 17 00:00:00 2001 From: Phil Reid Date: Mon, 20 Feb 2017 09:41:45 +0800 Subject: gpio: altera: Use handle_level_irq when configured as a level_high When a threaded irq handler is chained attached to one of the gpio pins when configure for level irq the altera_gpio_irq_leveL_high_handler does not mask the interrupt while being handled by the chained irq. This resulting in the threaded irq not getting enough cycles to complete quickly enough before the irq was disabled as faulty. handle_level_irq should be used in this situation instead of handle_simple_irq. In gpiochip_irqchip_add set default handler to handle_bad_irq as per Documentation/gpio/driver.txt. Then set the correct handler in the set_type callback. Signed-off-by: Phil Reid Signed-off-by: Linus Walleij --- drivers/gpio/gpio-altera.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 5bddbd507ca9..3fe6a21e05a5 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d, altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); - if (type == IRQ_TYPE_NONE) + if (type == IRQ_TYPE_NONE) { + irq_set_handler_locked(d, handle_bad_irq); return 0; - if (type == IRQ_TYPE_LEVEL_HIGH && - altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) - return 0; - if (type == IRQ_TYPE_EDGE_RISING && - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) - return 0; - if (type == IRQ_TYPE_EDGE_FALLING && - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING) - return 0; - if (type == IRQ_TYPE_EDGE_BOTH && - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH) + } + if (type == altera_gc->interrupt_trigger) { + if (type == IRQ_TYPE_LEVEL_HIGH) + irq_set_handler_locked(d, handle_level_irq); + else + irq_set_handler_locked(d, handle_simple_irq); return 0; - + } + irq_set_handler_locked(d, handle_bad_irq); return -EINVAL; } @@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } - static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; @@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev) altera_gc->interrupt_trigger = reg; ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, - handle_simple_irq, IRQ_TYPE_NONE); + handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(&pdev->dev, "could not add irqchip\n"); -- cgit v1.2.3 From f2f10b7e722a75c6d75a7f7cd06b0eee3ae20f7c Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Fri, 17 Feb 2017 07:40:52 -0600 Subject: HID: chicony: Add support for another ASUS Zen AiO keyboard Add support for media keys on the keyboard that comes with the Asus V221ID and ZN241IC All In One computers. The keys to support here are WLAN, BRIGHTNESSDOWN and BRIGHTNESSUP. This device is not visibly branded as Chicony, and the USB Vendor ID suggests that it is a JESS device. However this seems like the right place to put it: the usage codes are identical to the currently supported devices, and this driver already supports the ASUS AIO keyboard AK1D. Signed-off-by: Daniel Drake Signed-off-by: Jiri Kosina --- drivers/hid/Kconfig | 4 ++-- drivers/hid/hid-chicony.c | 1 + drivers/hid/hid-core.c | 1 + drivers/hid/hid-ids.h | 1 + 4 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 1aeb80e52424..8eab3200ac9a 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -175,11 +175,11 @@ config HID_CHERRY Support for Cherry Cymotion keyboard. config HID_CHICONY - tristate "Chicony Tactical pad" + tristate "Chicony devices" depends on HID default !EXPERT ---help--- - Support for Chicony Tactical pad. + Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index bc3cec199fee..f04ed9aabc3f 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, { } }; MODULE_DEVICE_TABLE(hid, ch_devices); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e9e87d337446..ae01ae601d74 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1910,6 +1910,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 86c95d30ac80..b3df60da0297 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -557,6 +557,7 @@ #define USB_VENDOR_ID_JESS 0x0c45 #define USB_DEVICE_ID_JESS_YUREX 0x1010 +#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112 #define USB_VENDOR_ID_JESS2 0x0f30 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111 -- cgit v1.2.3 From a687c5765b5ae19fe559e14615ddc87ebb46d409 Mon Sep 17 00:00:00 2001 From: Roderick Colenbrander Date: Fri, 24 Feb 2017 16:14:15 -0800 Subject: HID: sony: Fix input device leak when connecting a DS4 twice using USB/BT When a user connects a DS4 twice using USB and BT, we reject the second device connection after the setup work. We then perform a cleanup, but during cleanup we are not removing the touchpad device. This leads to leakage of an input device, which we would never remove. It can likely result into a kernel oops as well when the touchpad evdev node is accessed and the underlaying HID device has been removed from the system. [jkosina@suse.cz: added stable annotation] Fixes: ac797b95f532 ("HID: sony: Make the DS4 touchpad a separate device") Cc: stable@vger.kernel.org Signed-off-by: Roderick Colenbrander Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-sony.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index f405b07d0381..740996f9bdd4 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -2632,6 +2632,8 @@ err_stop: sony_leds_remove(sc); if (sc->quirks & SONY_BATTERY_SUPPORT) sony_battery_remove(sc); + if (sc->touchpad) + sony_unregister_touchpad(sc); sony_cancel_work_sync(sc); kfree(sc->output_report_dmabuf); sony_remove_dev_list(sc); -- cgit v1.2.3 From 4bd035eae255a4f1464dca063885fa415e58c12e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 23 Feb 2017 00:26:09 +0000 Subject: EDAC, xgene: Fix wrongly spelled "procesing" Fix spelling mistake in dev_err message. Signed-off-by: Colin Ian King Reviewed-by: Loc Ho Cc: linux-edac Link: http://lkml.kernel.org/r/20170223002609.9440-1-colin.king@canonical.com Signed-off-by: Borislav Petkov --- drivers/edac/xgene_edac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 6c270d9d304a..669246056812 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev) reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); if (!reg) goto chk_iob_axi0; - dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); + dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n"); if (reg & IOBPA_RDATA_CORRUPT_MASK) dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); if (reg & IOBPA_M_RDATA_CORRUPT_MASK) -- cgit v1.2.3 From 253160a8ad06bcc1c1db16a58b1f06d5128f6c5e Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Mon, 20 Feb 2017 15:20:56 +0200 Subject: clk: core: Copy connection id Some drivers use sprintf to build clk connection id names but the clk core will save those strings and occasionally print them back. Duplicate the con_id strings instead of fixing all the users. Signed-off-by: Leonard Crestez Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0fb39fe217d1..67201f67a14a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, clk->core = hw->core; clk->dev_id = dev_id; - clk->con_id = con_id; + clk->con_id = kstrdup_const(con_id, GFP_KERNEL); clk->max_rate = ULONG_MAX; clk_prepare_lock(); @@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk) hlist_del(&clk->clks_node); clk_prepare_unlock(); + kfree_const(clk->con_id); kfree(clk); } -- cgit v1.2.3 From 9b1b23f03abdd25ffde8bbfe5824b89bc0448c28 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Wed, 1 Mar 2017 22:00:41 +0100 Subject: clk: rockchip: add "," to mux_pll_src_apll_dpll_gpll_usb480m_p on rk3036 The mux_pll_src_apll_dpll_gpll_usb480m_p parent list was missing a "," between the 3rd and 4th parent names, making them fall together and thus lookups fail. Fix that. Fixes: 5190c08b2989 ("clk: rockchip: add clock controller for rk3036") Signed-off-by: Heiko Stuebner Signed-off-by: Stephen Boyd --- drivers/clk/rockchip/clk-rk3036.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index 924f560dcf80..dcde70f4c105 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" }; PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; -PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; +PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" }; PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; -- cgit v1.2.3 From f8ba2d68e54fbca340ad0fce97397291ba9637bc Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Wed, 1 Mar 2017 22:00:42 +0100 Subject: clk: rockchip: Make uartpll a child of the gpll on rk3036 The shared uart-pll is on boot a child of the apll that can get changed by cpu frequency scaling. So move it away to the more stable gpll to make sure the uart doesn't break on cpu frequency changes. This turned up during the 4.11 merge-window when commit 6a171b299379 ("serial: 8250_dw: Allow hardware flow control to be used") added general termios enablement making the uart on rk3036 change frequency and thus making it susceptible for the frequency scaling issue. Signed-off-by: Heiko Stuebner Signed-off-by: Stephen Boyd --- drivers/clk/rockchip/clk-rk3036.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index dcde70f4c105..00d4150e33c3 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np) return; } + /* + * Make uart_pll_clk a child of the gpll, as all other sources are + * not that usable / stable. + */ + writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10), + reg_base + RK2928_CLKSEL_CON(13)); + ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); -- cgit v1.2.3 From 45838660e34d90db8d4f7cbc8fd66e8aff79f4fe Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 7 Mar 2017 09:31:29 -0800 Subject: Input: i8042 - add noloop quirk for Dell Embedded Box PC 3000 The aux port does not get detected without noloop quirk, so external PS/2 mouse cannot work as result. The PS/2 mouse can work with this quirk. BugLink: https://bugs.launchpad.net/bugs/1591053 Signed-off-by: Kai-Heng Feng Reviewed-by: Marcos Paulo de Souza Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/serio/i8042-x86ia64io.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 05afd16ea9c9..e856b073d533 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -119,6 +119,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), }, }, + { + /* Dell Embedded Box PC 3000 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), + }, + }, { /* OQO Model 01 */ .matches = { -- cgit v1.2.3 From a04e54f2c35823ca32d56afcd5cea5b783e2f51a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 3 Nov 2016 23:06:53 -0700 Subject: target/pscsi: Fix TYPE_TAPE + TYPE_MEDIMUM_CHANGER export The following fixes a divide by zero OOPs with TYPE_TAPE due to pscsi_tape_read_blocksize() failing causing a zero sd->sector_size being propigated up via dev_attrib.hw_block_size. It also fixes another long-standing bug where TYPE_TAPE and TYPE_MEDIMUM_CHANGER where using pscsi_create_type_other(), which does not call scsi_device_get() to take the device reference. Instead, rename pscsi_create_type_rom() to pscsi_create_type_nondisk() and use it for all cases. Finally, also drop a dump_stack() in pscsi_get_blocks() for non TYPE_DISK, which in modern target-core can get invoked via target_sense_desc_format() during CHECK_CONDITION. Reported-by: Malcolm Haak Cc: Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pscsi.c | 47 ++++++++++---------------------------- 1 file changed, 12 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index a8f8e53f2f57..44d92f23a3f0 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, buf = kzalloc(12, GFP_KERNEL); if (!buf) - return; + goto out_free; memset(cdb, 0, MAX_COMMAND_SIZE); cdb[0] = MODE_SENSE; @@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, * If MODE_SENSE still returns zero, set the default value to 1024. */ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); +out_free: if (!sdev->sector_size) sdev->sector_size = 1024; -out_free: + kfree(buf); } @@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, sd->lun, sd->queue_depth); } - dev->dev_attrib.hw_block_size = sd->sector_size; + dev->dev_attrib.hw_block_size = + min_not_zero((int)sd->sector_size, 512); dev->dev_attrib.hw_max_sectors = - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); dev->dev_attrib.hw_queue_depth = sd->queue_depth; /* @@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, /* * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. */ - if (sd->type == TYPE_TAPE) + if (sd->type == TYPE_TAPE) { pscsi_tape_read_blocksize(dev, sd); + dev->dev_attrib.hw_block_size = sd->sector_size; + } return 0; } @@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) /* * Called with struct Scsi_Host->host_lock called. */ -static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) __releases(sh->host_lock) { struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; @@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) return 0; } -/* - * Called with struct Scsi_Host->host_lock called. - */ -static int pscsi_create_type_other(struct se_device *dev, - struct scsi_device *sd) - __releases(sh->host_lock) -{ - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; - struct Scsi_Host *sh = sd->host; - int ret; - - spin_unlock_irq(sh->host_lock); - ret = pscsi_add_device_to_list(dev, sd); - if (ret) - return ret; - - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, - sd->channel, sd->id, sd->lun); - return 0; -} - static int pscsi_configure_device(struct se_device *dev) { struct se_hba *hba = dev->se_hba; @@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) case TYPE_DISK: ret = pscsi_create_type_disk(dev, sd); break; - case TYPE_ROM: - ret = pscsi_create_type_rom(dev, sd); - break; default: - ret = pscsi_create_type_other(dev, sd); + ret = pscsi_create_type_nondisk(dev, sd); break; } @@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) else if (pdv->pdv_lld_host) scsi_host_put(pdv->pdv_lld_host); - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) - scsi_device_put(sd); + scsi_device_put(sd); pdv->pdv_sd = NULL; } @@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) if (pdv->pdv_bd && pdv->pdv_bd->bd_part) return pdv->pdv_bd->bd_part->nr_sects; - dump_stack(); return 0; } -- cgit v1.2.3 From 13603685c1f12c67a7a2427f00b63f39a2b6f7c9 Mon Sep 17 00:00:00 2001 From: Max Lohrmann Date: Tue, 7 Mar 2017 22:09:56 -0800 Subject: target: Fix VERIFY_16 handling in sbc_parse_cdb As reported by Max, the Windows 2008 R2 chkdsk utility expects VERIFY_16 to be supported, and does not handle the returned CHECK_CONDITION properly, resulting in an infinite loop. The kernel will log huge amounts of this error: kernel: TARGET_CORE[iSCSI]: Unsupported SCSI Opcode 0x8f, sending CHECK_CONDITION. Signed-off-by: Max Lohrmann Cc: Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_sbc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 68d8aef7ab78..c194063f169b 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) return ret; break; case VERIFY: + case VERIFY_16: size = 0; - sectors = transport_get_sectors_10(cdb); - cmd->t_task_lba = transport_lba_32(cdb); + if (cdb[0] == VERIFY) { + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + } cmd->execute_cmd = sbc_emulate_noop; goto check_lba; case REZERO_UNIT: -- cgit v1.2.3 From 67b0503db9c29b04eadfeede6bebbfe5ddad94ef Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Sun, 12 Feb 2017 13:02:13 -0200 Subject: [media] dvb-usb-firmware: don't do DMA on stack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The buffer allocation for the firmware data was changed in commit 43fab9793c1f ("[media] dvb-usb: don't use stack for firmware load") but the same applies for the reset value. Fixes: 43fab9793c1f ("[media] dvb-usb: don't use stack for firmware load") Cc: stable@vger.kernel.org Signed-off-by: Stefan Brüns Signed-off-by: Mauro Carvalho Chehab --- drivers/media/usb/dvb-usb/dvb-usb-firmware.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c index ab9866024ec7..04033efe7ad5 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c @@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { struct hexline *hx; - u8 reset; - int ret,pos=0; + u8 *buf; + int ret, pos = 0; + u16 cpu_cs_register = cypress[type].cpu_cs_register; - hx = kmalloc(sizeof(*hx), GFP_KERNEL); - if (!hx) + buf = kmalloc(sizeof(*hx), GFP_KERNEL); + if (!buf) return -ENOMEM; + hx = (struct hexline *)buf; /* stop the CPU */ - reset = 1; - if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) + buf[0] = 1; + if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) err("could not stop the USB controller CPU."); while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { @@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); - kfree(hx); + kfree(buf); return ret; } if (ret == 0) { /* restart the CPU */ - reset = 0; - if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { + buf[0] = 0; + if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; - kfree(hx); + kfree(buf); return ret; } -- cgit v1.2.3 From e61555c29c28a4a3b6ba6207f4a0883ee236004d Mon Sep 17 00:00:00 2001 From: Jérémy Lefaure Date: Wed, 8 Mar 2017 20:18:09 -0500 Subject: EDAC, i5000, i5400: Fix use of MTR_DRAM_WIDTH macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The MTR_DRAM_WIDTH macro returns the data width. It is sometimes used as if it returned a boolean true if the width if 8. Fix the tests where MTR_DRAM_WIDTH is misused. Signed-off-by: Jérémy Lefaure Cc: linux-edac Link: http://lkml.kernel.org/r/20170309011809.8340-1-jeremy.lefaure@lse.epita.fr Signed-off-by: Borislav Petkov --- drivers/edac/i5000_edac.c | 2 +- drivers/edac/i5400_edac.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 1670d27bcac8..f683919981b0 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci) dimm->mtype = MEM_FB_DDR2; /* ask what device type on this row */ - if (MTR_DRAM_WIDTH(mtr)) + if (MTR_DRAM_WIDTH(mtr) == 8) dimm->dtype = DEV_X8; else dimm->dtype = DEV_X4; diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index abf6ef22e220..37a9ba71da44 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) dimm->nr_pages = size_mb << 8; dimm->grain = 8; - dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; + dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ? + DEV_X8 : DEV_X4; dimm->mtype = MEM_FB_DDR2; /* * The eccc mechanism is SDDC (aka SECC), with * is similar to Chipkill. */ - dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? + dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ? EDAC_S8ECD8ED : EDAC_S4ECD4ED; ndimms++; } -- cgit v1.2.3 From 89cf83d4e065ff9fbd2ddc674489c8058eeca758 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 16 Feb 2017 12:54:41 +0000 Subject: drm/i915: Squelch any ktime/jiffie rounding errors for wait-ioctl We wait upon jiffies, but report the time elapsed using a high-resolution timer. This discrepancy can lead to us timing out the wait prior to us reporting the elapsed time as complete. This restores the squelching lost in commit e95433c73a11 ("drm/i915: Rearrange i915_wait_request() accounting with callers"). Fixes: e95433c73a11 ("drm/i915: Rearrange i915_wait_request() accounting with callers") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Joonas Lahtinen Cc: # v4.10-rc1+ Cc: stable@vger.kernel.org Link: http://patchwork.freedesktop.org/patch/msgid/20170216125441.30923-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen (cherry picked from commit c1d2061b28c2aa25ec39b60d9c248e6beebd7315) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6908123162d1..c45af09555dc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3029,6 +3029,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); if (args->timeout_ns < 0) args->timeout_ns = 0; + + /* + * Apparently ktime isn't accurate enough and occasionally has a + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch + * things up to make the test happy. We allow up to 1 jiffy. + * + * This is a regression from the timespec->ktime conversion. + */ + if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) + args->timeout_ns = 0; } i915_gem_object_put(obj); -- cgit v1.2.3 From 1d972d6021a1388021df51a58248e68372ce2b5d Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Thu, 23 Feb 2017 09:15:57 +0200 Subject: drm/i915/glk: Fix watermark computations for third sprite plane MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Geminilake has a third sprite plane (or fourth universal plane) that is independent from the cursor. Make sure that for_each_plane_id_on_crtc() is aware of that extra plane so that the watermark code takes it into account. Fixes: e9c9882556fc ("drm/i915/glk: Configure number of sprite planes properly") Cc: Ander Conselvan de Oliveira Cc: Rodrigo Vivi Cc: Daniel Vetter Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Cc: Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170223071600.14356-2-ander.conselvan.de.oliveira@intel.com (cherry picked from commit 19c3164db457e0fc65d4501fd354506228576241) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0a4b42d31391..7febe6eecf72 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -293,6 +293,7 @@ enum plane_id { PLANE_PRIMARY, PLANE_SPRITE0, PLANE_SPRITE1, + PLANE_SPRITE2, PLANE_CURSOR, I915_MAX_PLANES, }; -- cgit v1.2.3 From b717a0392530ae8da0da041abe5c3a6098b55660 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Feb 2017 11:43:06 +0000 Subject: drm/i915/fbdev: Stop repeating tile configuration on stagnation If we cease making progress in finding matching outputs for a tiled configuration, stop looping over the remaining unconfigured outputs. v2: Use conn_seq (instead of pass) to only apply tile configuration on first pass. Fixes: b0ee9e7fa5b4 ("drm/fb: add support for tiled monitor configurations. (v2)") Signed-off-by: Chris Wilson Cc: Tomasz Lis Cc: Dave Airlie Cc: Daniel Vetter Cc: Jani Nikula Cc: Sean Paul Cc: # v3.19+ Reviewed-by: Tomasz Lis Link: http://patchwork.freedesktop.org/patch/msgid/20170224114306.4400-1-chris@chris-wilson.co.uk (cherry picked from commit 754a76591b12c88f57ad8b4ca533a5c9566a1922) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_fbdev.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 1b8ba2e77539..2d449fb5d1d2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); - unsigned long conn_configured, mask; + unsigned long conn_configured, conn_seq, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); int i, j; bool *save_enabled; bool fallback = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; - int pass = 0; save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); if (!save_enabled) @@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, mask = BIT(count) - 1; conn_configured = 0; retry: + conn_seq = conn_configured; for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -387,7 +387,7 @@ retry: if (conn_configured & BIT(i)) continue; - if (pass == 0 && !connector->has_tile) + if (conn_seq == 0 && !connector->has_tile) continue; if (connector->status == connector_status_connected) @@ -498,10 +498,8 @@ retry: conn_configured |= BIT(i); } - if ((conn_configured & mask) != mask) { - pass++; + if ((conn_configured & mask) != mask && conn_configured != conn_seq) goto retry; - } /* * If the BIOS didn't enable everything it could, fall back to have the -- cgit v1.2.3 From 8c9923707f30ff56d9fd242053594b18f38d8036 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 27 Feb 2017 12:26:54 +0000 Subject: drm/i915: Remove the vma from the drm_mm if binding fails As we track whether a vma has been inserted into the drm_mm using the vma->flags, if we fail to bind the vma into the GTT we do not update those bits and will attempt to reinsert the vma into the drm_mm on future passes. To prevent that, we want to unwind i915_vma_insert() if we fail in our attempt to bind. Fixes: 59bfa1248e22 ("drm/i915: Start passing around i915_vma from execbuffer") Testcase: igt/drv_selftest/live_gtt Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Joonas Lahtinen Cc: # v4.9+ Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170227122654.27651-3-chris@chris-wilson.co.uk (cherry picked from commit 31c7effa39f21f0fea1b3250ae9ff32b9c7e1ae5) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_vma.c | 57 ++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 155906e84812..df20e9bc1c0f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -512,10 +512,36 @@ err_unpin: return ret; } +static void +i915_vma_remove(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + + drm_mm_remove_node(&vma->node); + list_move_tail(&vma->vm_link, &vma->vm->unbound_list); + + /* Since the unbound list is global, only move to that list if + * no more VMAs exist. + */ + if (--obj->bind_count == 0) + list_move_tail(&obj->global_link, + &to_i915(obj->base.dev)->mm.unbound_list); + + /* And finally now the object is completely decoupled from this vma, + * we can drop its hold on the backing storage and allow it to be + * reaped by the shrinker. + */ + i915_gem_object_unpin_pages(obj); + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); +} + int __i915_vma_do_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - unsigned int bound = vma->flags; + const unsigned int bound = vma->flags; int ret; lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); @@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma, if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { ret = -EBUSY; - goto err; + goto err_unpin; } if ((bound & I915_VMA_BIND_MASK) == 0) { ret = i915_vma_insert(vma, size, alignment, flags); if (ret) - goto err; + goto err_unpin; } ret = i915_vma_bind(vma, vma->obj->cache_level, flags); if (ret) - goto err; + goto err_remove; if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) __i915_vma_set_map_and_fenceable(vma); @@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma, GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); return 0; -err: +err_remove: + if ((bound & I915_VMA_BIND_MASK) == 0) { + GEM_BUG_ON(vma->pages); + i915_vma_remove(vma); + } +err_unpin: __i915_vma_unpin(vma); return ret; } @@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma) } vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); - drm_mm_remove_node(&vma->node); - list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - if (vma->pages != obj->mm.pages) { GEM_BUG_ON(!vma->pages); sg_free_table(vma->pages); @@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma) } vma->pages = NULL; - /* Since the unbound list is global, only move to that list if - * no more VMAs exist. */ - if (--obj->bind_count == 0) - list_move_tail(&obj->global_link, - &to_i915(obj->base.dev)->mm.unbound_list); - - /* And finally now the object is completely decoupled from this vma, - * we can drop its hold on the backing storage and allow it to be - * reaped by the shrinker. - */ - i915_gem_object_unpin_pages(obj); - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); + i915_vma_remove(vma); destroy: if (unlikely(i915_vma_is_closed(vma))) -- cgit v1.2.3 From 34dc8993eef63681b062871413a9484008a2a78f Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 15 Feb 2017 15:52:59 +0200 Subject: drm/i915: Avoid tweaking evaluation thresholds on Baytrail v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Certain Baytrails, namely the 4 cpu core variants, have been plaqued by spurious system hangs, mostly occurring with light loads. Multiple bisects by various people point to a commit which changes the reclocking strategy for Baytrail to follow its bigger brethen: commit 8fb55197e64d ("drm/i915: Agressive downclocking on Baytrail") There is also a review comment attached to this commit from Deepak S on avoiding punit access on Cherryview and thus it was excluded on common reclocking path. By taking the same approach and omitting the punit access by not tweaking the thresholds when the hardware has been asked to move into different frequency, considerable gains in stability have been observed. With J1900 box, light render/video load would end up in system hang in usually less than 12 hours. With this patch applied, the cumulative uptime has now been 34 days without issues. To provoke system hang, light loads on both render and bsd engines in parallel have been used: glxgears >/dev/null 2>/dev/null & mpv --vo=vaapi --hwdec=vaapi --loop=inf vid.mp4 So far, author has not witnessed system hang with above load and this patch applied. Reports from the tenacious people at kernel bugzilla are also promising. Considering that the punit access frequency with this patch is considerably less, there is a possibility that this will push the, still unknown, root cause past the triggering point on most loads. But as we now can reliably reproduce the hang independently, we can reduce the pain that users are having and use a static thresholds until a root cause is found. v3: don't break debugfs and simplification (Chris Wilson) References: https://bugzilla.kernel.org/show_bug.cgi?id=109051 Cc: Chris Wilson Cc: Ville Syrjälä Cc: Len Brown Cc: Daniel Vetter Cc: Jani Nikula Cc: fritsch@xbmc.org Cc: miku@iki.fi Cc: Ezequiel Garcia CC: Michal Feix Cc: Hans de Goede Cc: Deepak S Cc: Jarkko Nikula Cc: # v4.2+ Acked-by: Daniel Vetter Acked-by: Chris Wilson Signed-off-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1487166779-26945-1-git-send-email-mika.kuoppala@intel.com (cherry picked from commit 6067a27d1f0184596d51decbac1c1fdc4acb012f) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 249623d45be0..65cd4c56c9dd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) break; } + /* When byt can survive without system hang with dynamic + * sw freq adjustments, this restriction can be lifted. + */ + if (IS_VALLEYVIEW(dev_priv)) + goto skip_hw_write; + I915_WRITE(GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(dev_priv, ei_up)); I915_WRITE(GEN6_RP_UP_THRESHOLD, @@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) GEN6_RP_UP_BUSY_AVG | GEN6_RP_DOWN_IDLE_AVG); +skip_hw_write: dev_priv->rps.power = new_power; dev_priv->rps.up_threshold = threshold_up; dev_priv->rps.down_threshold = threshold_down; -- cgit v1.2.3 From d253371c4c2f5fc2d884ef25f64decd7549aff5a Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 24 Feb 2017 16:32:10 +0200 Subject: drm/i915/gen9: Increase PCODE request timeout to 50ms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After commit 2c7d0602c815277f7cb7c932b091288710d8aba7 Author: Imre Deak Date: Mon Dec 5 18:27:37 2016 +0200 drm/i915/gen9: Fix PCODE polling during CDCLK change notification there is still one report of the CDCLK-change request timing out on a KBL machine, see the Reference link. On that machine the maximum time the request took to succeed was 34ms, so increase the timeout to 50ms. v2: - Change timeout from 100 to 50 ms to maintain the current 50 ms limit for atomic waits in the driver. (Chris, Tvrtko) Reference: https://bugs.freedesktop.org/show_bug.cgi?id=99345 Cc: Ville Syrjälä Cc: Chris Wilson Cc: Tvrtko Ursulin Cc: Signed-off-by: Imre Deak Acked-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1487946730-17162-1-git-send-email-imre.deak@intel.com (cherry picked from commit 0129936ddda26afd5d9d207c4e86b2425952579f) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 65cd4c56c9dd..940bab22d464 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7923,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, * @timeout_base_ms: timeout for polling with preemption enabled * * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE - * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. * The request is acknowledged once the PCODE reply dword equals @reply after * applying @reply_mask. Polling is first attempted with preemption enabled - * for @timeout_base_ms and if this times out for another 10 ms with + * for @timeout_base_ms and if this times out for another 50 ms with * preemption disabled. * * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some @@ -7962,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, * worst case) _and_ PCODE was busy for some reason even after a * (queued) request and @timeout_base_ms delay. As a workaround retry * the poll with preemption disabled to maximize the number of - * requests. Increase the timeout from @timeout_base_ms to 10ms to + * requests. Increase the timeout from @timeout_base_ms to 50ms to * account for interrupts that could reduce the number of these - * requests. + * requests, and for any quirks of the PCODE firmware that delays + * the request completion. */ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); WARN_ON_ONCE(timeout_base_ms > 3); preempt_disable(); - ret = wait_for_atomic(COND, 10); + ret = wait_for_atomic(COND, 50); preempt_enable(); out: -- cgit v1.2.3 From 38230243ef316ac696956d75dc78a22e3aa789b9 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 28 Feb 2017 15:28:47 +0100 Subject: drm/i915: Move updating color management to before vblank evasion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This cannot be done reliably during vblank evasasion since the color management registers are not double buffered. The original commit that moved it always during vblank evasion was wrong, so revert it to before vblank evasion again. Signed-off-by: Maarten Lankhorst Fixes: 20a34e78f0d7 ("drm/i915: Update color management during vblank evasion.") Cc: stable@vger.kernel.org # v4.7+ Link: http://patchwork.freedesktop.org/patch/msgid/1488292128-14540-1-git-send-email-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä (cherry picked from commit 567f0792a6ad11c0c2620944b8eeb777359fb85a) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 01341670738f..9a8b6a13233d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14946,17 +14946,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, to_intel_atomic_state(old_crtc_state->state); bool modeset = needs_modeset(crtc->state); + if (!modeset && + (intel_cstate->base.color_mgmt_changed || + intel_cstate->update_pipe)) { + intel_color_set_csc(crtc->state); + intel_color_load_luts(crtc->state); + } + /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_crtc); if (modeset) goto out; - if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { - intel_color_set_csc(crtc->state); - intel_color_load_luts(crtc->state); - } - if (intel_cstate->update_pipe) intel_update_pipe_config(intel_crtc, old_intel_cstate); else if (INTEL_GEN(dev_priv) >= 9) -- cgit v1.2.3 From 0d9dc306e15b59bf50db87ebcb1e2248586d4733 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 7 Mar 2017 13:20:31 +0000 Subject: drm/i915: Store a permanent error in obj->mm.pages Once the object has been truncated, it is unrecoverable. To facilitate detection of this state store the error in obj->mm.pages. This is required for the next patch which should be applied to v4.10 (via stable), so we also need to mark this patch for backporting. In that regard, let's consider this to be a fix/improvement too. v2: Avoid dereferencing the ERR_PTR when freeing the object. Fixes: 1233e2db199d ("drm/i915: Move object backing storage manipulation to its own locking") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: # v4.10+ Link: http://patchwork.freedesktop.org/patch/msgid/20170307132031.32461-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen (cherry picked from commit 4e5462ee843c883790e9609cf560d88960ea4227) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c45af09555dc..3591e8656ff9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2119,6 +2119,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) */ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; + obj->mm.pages = ERR_PTR(-EFAULT); } /* Try to discard unwanted pages */ @@ -2218,7 +2219,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, __i915_gem_object_reset_page_iter(obj); - obj->ops->put_pages(obj, pages); + if (!IS_ERR(pages)) + obj->ops->put_pages(obj, pages); + unlock: mutex_unlock(&obj->mm.lock); } @@ -2437,7 +2440,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (err) return err; - if (unlikely(!obj->mm.pages)) { + if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { err = ____i915_gem_object_get_pages(obj); if (err) goto unlock; @@ -2515,7 +2518,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, pinned = true; if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { - if (unlikely(!obj->mm.pages)) { + if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { ret = ____i915_gem_object_get_pages(obj); if (ret) goto err_unlock; -- cgit v1.2.3 From 4e6fdafa7ac395ad47a80a0e7b4fd1e11550f862 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 7 Mar 2017 12:03:38 +0000 Subject: drm/i915: Use pagecache write to prepopulate shmemfs from pwrite-ioctl Before we instantiate/pin the backing store for our use, we can prepopulate the shmemfs filp efficiently using a write into the pagecache. We avoid the penalty of instantiating all the pages, important if the user is just writing to a few and never uses the object on the GPU, and using a direct write into shmemfs allows it to avoid the cost of retrieving a page (mostly the clear-before-use, but in theory we could curtail swapin) before it is overwritten. This can be extended later to provide additional specialisation for other backends (other than shmemfs). For now it provides a defense against very large write-only allocations from exhausting all of system memory. v2: Smelling fixes. Fixes: fe115628d567 ("drm/i915: Implement pwrite without struct-mutex") References: https://bugs.freedesktop.org/show_bug.cgi?id=99107 Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Joonas Lahtinen Cc: Mika Kuoppala Cc: # v4.10+ Reviewed-by: Joonas Lahtinen Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170307120338.7277-2-chris@chris-wilson.co.uk (cherry picked from commit 7c55e2c5772dcf3cbacd0fa2bcfeefae416b73f7) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 78 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_object.h | 3 ++ 2 files changed, 81 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3591e8656ff9..10777da73039 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, trace_i915_gem_object_pwrite(obj, args->offset, args->size); + ret = -ENODEV; + if (obj->ops->pwrite) + ret = obj->ops->pwrite(obj, args); + if (ret != -ENODEV) + goto err; + ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, @@ -2566,6 +2572,75 @@ err_unlock: goto out_unlock; } +static int +i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *arg) +{ + struct address_space *mapping = obj->base.filp->f_mapping; + char __user *user_data = u64_to_user_ptr(arg->data_ptr); + u64 remain, offset; + unsigned int pg; + + /* Before we instantiate/pin the backing store for our use, we + * can prepopulate the shmemfs filp efficiently using a write into + * the pagecache. We avoid the penalty of instantiating all the + * pages, important if the user is just writing to a few and never + * uses the object on the GPU, and using a direct write into shmemfs + * allows it to avoid the cost of retrieving a page (either swapin + * or clearing-before-use) before it is overwritten. + */ + if (READ_ONCE(obj->mm.pages)) + return -ENODEV; + + /* Before the pages are instantiated the object is treated as being + * in the CPU domain. The pages will be clflushed as required before + * use, and we can freely write into the pages directly. If userspace + * races pwrite with any other operation; corruption will ensue - + * that is userspace's prerogative! + */ + + remain = arg->size; + offset = arg->offset; + pg = offset_in_page(offset); + + do { + unsigned int len, unwritten; + struct page *page; + void *data, *vaddr; + int err; + + len = PAGE_SIZE - pg; + if (len > remain) + len = remain; + + err = pagecache_write_begin(obj->base.filp, mapping, + offset, len, 0, + &page, &data); + if (err < 0) + return err; + + vaddr = kmap(page); + unwritten = copy_from_user(vaddr + pg, user_data, len); + kunmap(page); + + err = pagecache_write_end(obj->base.filp, mapping, + offset, len, len - unwritten, + page, data); + if (err < 0) + return err; + + if (unwritten) + return -EFAULT; + + remain -= len; + user_data += len; + offset += len; + pg = 0; + } while (remain); + + return 0; +} + static bool ban_context(const struct i915_gem_context *ctx) { return (i915_gem_context_is_bannable(ctx) && @@ -3987,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = i915_gem_object_get_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt, + + .pwrite = i915_gem_object_pwrite_gtt, }; struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index bf90b07163d1..76b80a0be797 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops { struct sg_table *(*get_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); + int (*pwrite)(struct drm_i915_gem_object *, + const struct drm_i915_gem_pwrite *); + int (*dmabuf_export)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *); }; -- cgit v1.2.3 From edd06b8353772dca7afcd4640dafa83b521edd55 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 7 Mar 2017 22:54:19 +0200 Subject: drm/i915: Nuke debug messages from the pipe update critical section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit printks are slow so we should not be doing them from the vblank evade critical section. These could explain why we sometimes seem to blow past our 100 usec deadline. The problem has been there ever since commit bfd16b2a23dc ("drm/i915: Make updating pipe without modeset atomic.") but it may not have been readily visible until commit e1edbd44e23b ("drm/i915: Complain if we take too long under vblank evasion.") increased our chances of noticing it. Cc: stable@vger.kernel.org Cc: Maarten Lankhorst Fixes: bfd16b2a23dc ("drm/i915: Make updating pipe without modeset atomic.") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170307205419.19447-1-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst (cherry picked from commit c3f8ad57a01a31397e5a0349a226a32f35ddc19c) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9a8b6a13233d..b3e0cd133b49 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ crtc->base.mode = crtc->base.state->mode; - DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", - old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, - pipe_config->pipe_src_w, pipe_config->pipe_src_h); - /* * Update pipe size and adjust fitter if needed: the reason for this is * that in compute_mode_changes we check the native mode (not the pfit @@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) struct intel_crtc_scaler_state *scaler_state = &crtc->config->scaler_state; - DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); - if (crtc->config->pch_pfit.enabled) { int id; - if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { - DRM_ERROR("Requesting pfit without getting a scaler first\n"); + if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) return; - } id = scaler_state->scaler_id; I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); - - DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); } } -- cgit v1.2.3 From 5a8cf90d743f2d05433c6109f6c1b9b904b0cdb7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Feb 2017 20:47:41 +0000 Subject: drm/i915: Drain the freed state from the tail of the next commit If we have any residual freed atomic state from earlier commits, flush the freed list after performing the current modeset. This prevents the freed list from ever-growing if userspace manages to starve the kernel threads (i.e. we are never able to run our free state worker and eventually the system may even oom). Fixes: 6f0f02dc56f1 ("drm/i915: Move atomic state free from out of fence release") Testcase: igt/kms_cursor/legacy/all-pipes-single-bo Reported-by: Maarten Lankhorst Signed-off-by: Chris Wilson Cc: Maarten Lankhorst Cc: Joonas Lahtinen Cc: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170202204741.18231-1-chris@chris-wilson.co.uk Reviewed-by: Maarten Lankhorst (cherry picked from commit ba318c61a9719577b6f451c055f364e4116874b2) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b3e0cd133b49..3282b0f4b134 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14369,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state, } while (progress); } +static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) +{ + struct intel_atomic_state *state, *next; + struct llist_node *freed; + + freed = llist_del_all(&dev_priv->atomic_helper.free_list); + llist_for_each_entry_safe(state, next, freed, freed) + drm_atomic_state_put(&state->base); +} + +static void intel_atomic_helper_free_state_worker(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), atomic_helper.free_work); + + intel_atomic_helper_free_state(dev_priv); +} + static void intel_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -14535,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * can happen also when the device is completely off. */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + + intel_atomic_helper_free_state(dev_priv); } static void intel_atomic_commit_work(struct work_struct *work) @@ -16591,18 +16611,6 @@ fail: drm_modeset_acquire_fini(&ctx); } -static void intel_atomic_helper_free_state(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), atomic_helper.free_work); - struct intel_atomic_state *state, *next; - struct llist_node *freed; - - freed = llist_del_all(&dev_priv->atomic_helper.free_list); - llist_for_each_entry_safe(state, next, freed, freed) - drm_atomic_state_put(&state->base); -} - int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16623,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &intel_mode_funcs; INIT_WORK(&dev_priv->atomic_helper.free_work, - intel_atomic_helper_free_state); + intel_atomic_helper_free_state_worker); intel_init_quirks(dev); -- cgit v1.2.3 From 9200c6f177638909dbbaded8aeeeccbd48744400 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Thu, 9 Feb 2017 00:30:22 +0100 Subject: Revert "phy: Add USB3 PHY support for Broadcom NSP SoC" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit d7bc1a7d41bf ("phy: Add USB3 PHY support for Broadcom NSP SoC") as we already have driver for this PHY (shared by NS and NSP). It was added in commit e5666281d9ea ("phy: bcm-ns-usb3: new driver for USB 3.0 PHY on Northstar"). Instead of adding separated driver & duplicating code we should work on improving existing (old) one. Thanks to work done by Broadcom we know there is MDIO bus we weren't aware of & we know register names which makes initialization more clear. This is very valuable info and we should work on using it in existing driver afterwards. Acked-by: Jon Mason Signed-off-by: Rafał Miłecki --- drivers/phy/Kconfig | 8 -- drivers/phy/Makefile | 1 - drivers/phy/phy-bcm-nsp-usb3.c | 177 ----------------------------------------- 3 files changed, 186 deletions(-) delete mode 100644 drivers/phy/phy-bcm-nsp-usb3.c (limited to 'drivers') diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index dc5277ad1b5a..c11044439fd4 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -510,12 +510,4 @@ config PHY_MESON8B_USB2 and GXBB SoCs. If unsure, say N. -config PHY_NSP_USB3 - tristate "Broadcom NorthStar plus USB3 PHY driver" - depends on OF && (ARCH_BCM_NSP || COMPILE_TEST) - select GENERIC_PHY - default ARCH_BCM_NSP - help - Enable this to support the Broadcom Northstar plus USB3 PHY. - If unsure, say N. endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index e7b0feb1e125..dd8f3b5d2918 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o -obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c deleted file mode 100644 index 49024eaa5545..000000000000 --- a/drivers/phy/phy-bcm-nsp-usb3.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (C) 2016 Broadcom - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define NSP_USB3_RST_CTRL_OFFSET 0x3f8 - -/* mdio reg access */ -#define NSP_USB3_PHY_BASE_ADDR_REG 0x1f - -#define NSP_USB3_PHY_PLL30_BLOCK 0x8000 -#define NSP_USB3_PLL_CONTROL 0x01 -#define NSP_USB3_PLLA_CONTROL0 0x0a -#define NSP_USB3_PLLA_CONTROL1 0x0b - -#define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040 -#define NSP_USB3_TX_PMD_CONTROL1 0x01 - -#define NSP_USB3_PHY_PIPE_BLOCK 0x8060 -#define NSP_USB3_LFPS_CMP 0x02 -#define NSP_USB3_LFPS_DEGLITCH 0x03 - -struct nsp_usb3_phy { - struct regmap *usb3_ctrl; - struct phy *phy; - struct mdio_device *mdiodev; -}; - -static int nsp_usb3_phy_init(struct phy *phy) -{ - struct nsp_usb3_phy *iphy = phy_get_drvdata(phy); - struct mii_bus *bus = iphy->mdiodev->bus; - int addr = iphy->mdiodev->addr; - u32 data; - int rc; - - rc = regmap_read(iphy->usb3_ctrl, 0, &data); - if (rc) - return rc; - data |= 1; - rc = regmap_write(iphy->usb3_ctrl, 0, data); - if (rc) - return rc; - - rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, - NSP_USB3_PHY_PLL30_BLOCK); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000); - if (rc) - return rc; - - rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, - NSP_USB3_PHY_PIPE_BLOCK); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, - NSP_USB3_PHY_TX_PMD_BLOCK); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003); - - return rc; -} - -static struct phy_ops nsp_usb3_phy_ops = { - .init = nsp_usb3_phy_init, - .owner = THIS_MODULE, -}; - -static int nsp_usb3_phy_probe(struct mdio_device *mdiodev) -{ - struct device *dev = &mdiodev->dev; - struct phy_provider *provider; - struct nsp_usb3_phy *iphy; - - iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL); - if (!iphy) - return -ENOMEM; - iphy->mdiodev = mdiodev; - - iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node, - "usb3-ctrl-syscon"); - if (IS_ERR(iphy->usb3_ctrl)) - return PTR_ERR(iphy->usb3_ctrl); - - iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops); - if (IS_ERR(iphy->phy)) { - dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(iphy->phy); - } - - phy_set_drvdata(iphy->phy, iphy); - - provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(provider)) { - dev_err(dev, "could not register PHY provider\n"); - return PTR_ERR(provider); - } - - return 0; -} - -static const struct of_device_id nsp_usb3_phy_of_match[] = { - {.compatible = "brcm,nsp-usb3-phy",}, - { /* sentinel */ } -}; - -static struct mdio_driver nsp_usb3_phy_driver = { - .mdiodrv = { - .driver = { - .name = "nsp-usb3-phy", - .of_match_table = nsp_usb3_phy_of_match, - }, - }, - .probe = nsp_usb3_phy_probe, -}; - -mdio_module_driver(nsp_usb3_phy_driver); - -MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy Date: Wed, 8 Mar 2017 17:22:42 +0900 Subject: phy: phy-exynos-pcie: fix the wrong error return When it doesn't get the blk_base's resource, it was returned the error about phy_base, not blk_base. This patch is for fixing the wrong error return about blk_base. Fixes: cf0adb8e281b ("phy: phy-exynos-pcie: Add support for Exynos PCIe PHY") Signed-off-by: Jaehoon Chung Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/phy-exynos-pcie.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c index 4f60b83641d5..60baf25d98e2 100644 --- a/drivers/phy/phy-exynos-pcie.c +++ b/drivers/phy/phy-exynos-pcie.c @@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); exynos_phy->blk_base = devm_ioremap_resource(dev, res); - if (IS_ERR(exynos_phy->phy_base)) - return PTR_ERR(exynos_phy->phy_base); + if (IS_ERR(exynos_phy->blk_base)) + return PTR_ERR(exynos_phy->blk_base); exynos_phy->drv_data = drv_data; -- cgit v1.2.3 From 1a09b6a7c10e22c489a8b212dd6862b1fd9674ad Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 9 Mar 2017 13:45:44 +0530 Subject: phy: qcom-usb-hs: Add depends on EXTCON We get the following compile errors if EXTCON is enabled as a module but this driver is builtin: drivers/built-in.o: In function `qcom_usb_hs_phy_power_off': phy-qcom-usb-hs.c:(.text+0x1089): undefined reference to `extcon_unregister_notifier' drivers/built-in.o: In function `qcom_usb_hs_phy_probe': phy-qcom-usb-hs.c:(.text+0x11b5): undefined reference to `extcon_get_edev_by_phandle' drivers/built-in.o: In function `qcom_usb_hs_phy_power_on': phy-qcom-usb-hs.c:(.text+0x128e): undefined reference to `extcon_get_state' phy-qcom-usb-hs.c:(.text+0x12a9): undefined reference to `extcon_register_notifier' so let's mark this as needing to follow the modular status of the extcon framework. Fixes: 9994a33865f4 e2427b09ba929c2b9 (phy: Add support for Qualcomm's USB HS phy") Signed-off-by: Stephen Boyd Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index c11044439fd4..005cadb7a3f8 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -449,6 +449,7 @@ config PHY_QCOM_UFS config PHY_QCOM_USB_HS tristate "Qualcomm USB HS PHY module" depends on USB_ULPI_BUS + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in select GENERIC_PHY help Support for the USB high-speed ULPI compliant phy on Qualcomm -- cgit v1.2.3 From 6d399783e9d4e9bd44931501948059d24ad96ff8 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 23 Feb 2017 12:26:41 -0800 Subject: md/raid10: submit bio directly to replacement disk Commit 57c67df(md/raid10: submit IO from originating thread instead of md thread) submits bio directly for normal disks but not for replacement disks. There is no point we shouldn't do this for replacement disks. Cc: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid10.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 063c43d83b72..1443305613c5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1477,11 +1477,24 @@ retry_write: mbio->bi_bdev = (void*)rdev; atomic_inc(&r10_bio->remaining); + + cb = blk_check_plugged(raid10_unplug, mddev, + sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid10_plug_cb, + cb); + else + plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) + if (!plug) md_wakeup_thread(mddev->thread); } } -- cgit v1.2.3 From 99b3d74ec05c4a4c57766a90d65b53d78ab06404 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 23 Feb 2017 12:31:10 -0800 Subject: md: delete dead code Nobody is using mddev_check_plugged(), so delete the dead code Signed-off-by: Shaohua Li --- drivers/md/md.c | 8 -------- drivers/md/md.h | 6 ------ 2 files changed, 14 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 548d1b8014f8..82bd1f3d2b19 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -void md_unplug(struct blk_plug_cb *cb, bool from_schedule) -{ - struct mddev *mddev = cb->data; - md_wakeup_thread(mddev->thread); - kfree(cb); -} -EXPORT_SYMBOL(md_unplug); - static inline struct mddev *mddev_get(struct mddev *mddev) { atomic_inc(&mddev->active); diff --git a/drivers/md/md.h b/drivers/md/md.h index b8859cbf84b6..dde8ecb760c8 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); extern void md_kick_rdev_from_array(struct md_rdev * rdev); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); -static inline int mddev_check_plugged(struct mddev *mddev) -{ - return !!blk_check_plugged(md_unplug, mddev, - sizeof(struct blk_plug_cb)); -} static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) { -- cgit v1.2.3 From 9c8043f337f14d1743006dfc59c03e80a42e3884 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 24 Feb 2017 11:15:12 +0800 Subject: md-cluster: free md_cluster_info if node leave cluster To avoid memory leak, we need to free the cinfo which is allocated when node join cluster. Reviewed-by: NeilBrown Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/md-cluster.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 2b13117fb918..ba7edcdd09ce 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -974,6 +974,7 @@ static int leave(struct mddev *mddev) lockres_free(cinfo->bitmap_lockres); unlock_all_bitmaps(mddev); dlm_release_lockspace(cinfo->lockspace, 2); + kfree(cinfo); return 0; } -- cgit v1.2.3 From 75df023f4f2188c21181996e28234fef9351ef45 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 24 Feb 2017 11:15:13 +0800 Subject: md-cluster: remove useless memset from gather_all_resync_info This memset is not needed. The lvb is already zeroed because it was recently allocated by lockres_init, which uses kzalloc(), and read_resync_info() doesn't need it to be zero anyway. Reviewed-by: NeilBrown Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/md-cluster.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index ba7edcdd09ce..321ecac23027 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) bm_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); if (ret == -EAGAIN) { - memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE); s = read_resync_info(mddev, bm_lockres); if (s) { pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", -- cgit v1.2.3 From c94836342192b05d599d6aa3397f732f7a238689 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 24 Feb 2017 11:15:23 +0800 Subject: md: move funcs from pers->resize to update_size raid1_resize and raid5_resize should also check the mddev->queue if run underneath dm-raid. And both set_capacity and revalidate_disk are used in pers->resize such as raid1, raid10 and raid5. So move them from personality file to common code. Reviewed-by: NeilBrown Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/md.c | 8 ++++++-- drivers/md/raid1.c | 2 -- drivers/md/raid10.c | 4 ---- drivers/md/raid5.c | 2 -- 4 files changed, 6 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 82bd1f3d2b19..bd15a18485c8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6525,8 +6525,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) return -ENOSPC; } rv = mddev->pers->resize(mddev, num_sectors); - if (!rv) - revalidate_disk(mddev->gendisk); + if (!rv) { + if (mddev->queue) { + set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); + } + } return rv; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fbc2d7851b49..10c3865e1186 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3246,8 +3246,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) return ret; } md_set_array_sectors(mddev, newsize); - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { mddev->recovery_cp = mddev->dev_sectors; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1443305613c5..c4db6d1fb6a2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3956,10 +3956,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) return ret; } md_set_array_sectors(mddev, size); - if (mddev->queue) { - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); - } if (sectors > mddev->dev_sectors && mddev->recovery_cp > oldsize) { mddev->recovery_cp = oldsize; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4fb09b3fcb41..6bfedfcf41c1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7605,8 +7605,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) return ret; } md_set_array_sectors(mddev, newsize); - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { mddev->recovery_cp = mddev->dev_sectors; -- cgit v1.2.3 From 1b3bae49fba52f1ec499c36c53bc07761a9f6c4d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 1 Mar 2017 07:31:28 +1100 Subject: md: don't impose the MD_SB_DISKS limit on arrays without metadata. These arrays, created with "mdadm --build" don't benefit from a limit. The default will be used, which is '0' and is interpreted as "don't impose a limit". Reported-by: ian_bruce@mail.ru Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index bd15a18485c8..cd89ad3c3a0d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6450,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->layout = info->layout; mddev->chunk_sectors = info->chunk_size >> 9; - mddev->max_disks = MD_SB_DISKS; - if (mddev->persistent) { - mddev->flags = 0; - mddev->sb_flags = 0; + mddev->max_disks = MD_SB_DISKS; + mddev->flags = 0; + mddev->sb_flags = 0; } set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); -- cgit v1.2.3 From 61eb2b43b99ebdc9bc6bc83d9792257b243e7cb3 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 28 Feb 2017 13:00:20 -0800 Subject: md/raid1/10: fix potential deadlock Neil Brown pointed out a potential deadlock in raid 10 code with bio_split/chain. The raid1 code could have the same issue, but recent barrier rework makes it less likely to happen. The deadlock happens in below sequence: 1. generic_make_request(bio), this will set current->bio_list 2. raid10_make_request will split bio to bio1 and bio2 3. __make_request(bio1), wait_barrer, add underlayer disk bio to current->bio_list 4. __make_request(bio2), wait_barrer If raise_barrier happens between 3 & 4, since wait_barrier runs at 3, raise_barrier waits for IO completion from 3. And since raise_barrier sets barrier, 4 waits for raise_barrier. But IO from 3 can't be dispatched because raid10_make_request() doesn't finished yet. The solution is to adjust the IO ordering. Quotes from Neil: " It is much safer to: if (need to split) { split = bio_split(bio, ...) bio_chain(...) make_request_fn(split); generic_make_request(bio); } else make_request_fn(mddev, bio); This way we first process the initial section of the bio (in 'split') which will queue some requests to the underlying devices. These requests will be queued in generic_make_request. Then we queue the remainder of the bio, which will be added to the end of the generic_make_request queue. Then we return. generic_make_request() will pop the lower-level device requests off the queue and handle them first. Then it will process the remainder of the original bio once the first section has been fully processed. " Note, this only happens in read path. In write path, the bio is flushed to underlaying disks either by blk flush (from schedule) or offladed to raid1/10d. It's queued in current->bio_list. Cc: Coly Li Cc: stable@vger.kernel.org (v3.14+, only the raid10 part) Suggested-by: NeilBrown Reviewed-by: Jack Wang Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 25 +++++++++++++++++++++++-- drivers/md/raid10.c | 18 ++++++++++++++++++ 2 files changed, 41 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 10c3865e1186..c33e96e33b8e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) split = bio; } - if (bio_data_dir(split) == READ) + if (bio_data_dir(split) == READ) { raid1_read_request(mddev, split); - else + + /* + * If a bio is splitted, the first part of bio will + * pass barrier but the bio is queued in + * current->bio_list (see generic_make_request). If + * there is a raise_barrier() called here, the second + * part of bio can't pass barrier. But since the first + * part bio isn't dispatched to underlaying disks yet, + * the barrier is never released, hence raise_barrier + * will alays wait. We have a deadlock. + * Note, this only happens in read path. For write + * path, the first part of bio is dispatched in a + * schedule() call (because of blk plug) or offloaded + * to raid10d. + * Quitting from the function immediately can change + * the bio order queued in bio_list and avoid the deadlock. + */ + if (split != bio) { + generic_make_request(bio); + break; + } + } else raid1_write_request(mddev, split); } while (split != bio); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c4db6d1fb6a2..b1b1f982a722 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1584,7 +1584,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) split = bio; } + /* + * If a bio is splitted, the first part of bio will pass + * barrier but the bio is queued in current->bio_list (see + * generic_make_request). If there is a raise_barrier() called + * here, the second part of bio can't pass barrier. But since + * the first part bio isn't dispatched to underlaying disks + * yet, the barrier is never released, hence raise_barrier will + * alays wait. We have a deadlock. + * Note, this only happens in read path. For write path, the + * first part of bio is dispatched in a schedule() call + * (because of blk plug) or offloaded to raid10d. + * Quitting from the function immediately can change the bio + * order queued in bio_list and avoid the deadlock. + */ __make_request(mddev, split); + if (split != bio && bio_data_dir(bio) == READ) { + generic_make_request(bio); + break; + } } while (split != bio); /* In case raid10d snuck in to freeze_array */ -- cgit v1.2.3 From 9a8b0a230aca55ee142fd76f4765f1da1799da93 Mon Sep 17 00:00:00 2001 From: Mihail Atanassov Date: Wed, 15 Feb 2017 14:00:15 +0000 Subject: drm: mali-dp: Remove mclk rate management The rate of mclk depends on the use-case. If no downscaling is required, then mclk == pxlclk is a valid option; with downscaling however, the rate at which mclk runs determines how much a plane can be downscaled before composition. This is a system integration + power management issue that is more suited to firmware rather than this driver. Signed-off-by: Mihail Atanassov Signed-off-by: Liviu Dudau --- drivers/gpu/drm/arm/malidp_crtc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 08e6a71f5d05..294b53697334 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc) clk_prepare_enable(hwdev->pxlclk); - /* mclk needs to be set to the same or higher rate than pxlclk */ - clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000); + /* We rely on firmware to set mclk to a sensible level. */ clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); hwdev->modeset(hwdev, &vm); -- cgit v1.2.3 From d1479f6108006555fe33d7cfe8db4f95ad614b9a Mon Sep 17 00:00:00 2001 From: Mihail Atanassov Date: Thu, 9 Feb 2017 11:32:00 +0000 Subject: drm: mali-dp: Fix smart layer not going to composition Use rectangle 1 as a generic plane. Existing code already sets the smart layer bounding box size + offset. The rectangles' offsets are relative to the bounding box, so there is no need to set R1's offset (reset value is 0), just its size which is the same as the bounding box. Signed-off-by: Mihail Atanassov Signed-off-by: Liviu Dudau --- drivers/gpu/drm/arm/malidp_hw.c | 2 +- drivers/gpu/drm/arm/malidp_planes.c | 18 ++++++++++++++++-- drivers/gpu/drm/arm/malidp_regs.h | 1 + 3 files changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 488aedf5b58d..9f5513006eee 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = { { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, - { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE }, }; #define MALIDP_DE_DEFAULT_PREFETCH_START 5 diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 414aada10fe5..d5aec082294c 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -37,6 +37,8 @@ #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) #define MALIDP_LAYER_COMP_SIZE 0x010 #define MALIDP_LAYER_OFFSET 0x014 +#define MALIDP550_LS_ENABLE 0x01c +#define MALIDP550_LS_R1_IN_SIZE 0x020 /* * This 4-entry look-up-table is used to determine the full 8-bit alpha value @@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane, LAYER_V_VAL(plane->state->crtc_y), mp->layer->base + MALIDP_LAYER_OFFSET); + if (mp->layer->id == DE_SMART) + malidp_hw_write(mp->hwdev, + LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), + mp->layer->base + MALIDP550_LS_R1_IN_SIZE); + /* first clear the rotation bits */ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); val &= ~LAYER_ROT_MASK; @@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm) plane->hwdev = malidp->dev; plane->layer = &map->layers[i]; - /* Skip the features which the SMART layer doesn't have */ - if (id == DE_SMART) + if (id == DE_SMART) { + /* + * Enable the first rectangle in the SMART layer to be + * able to use it as a drm plane. + */ + malidp_hw_write(malidp->dev, 1, + plane->layer->base + MALIDP550_LS_ENABLE); + /* Skip the features which the SMART layer doesn't have. */ continue; + } drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index aff6d4a84e99..b816067a65c5 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -84,6 +84,7 @@ /* Stride register offsets relative to Lx_BASE */ #define MALIDP_DE_LG_STRIDE 0x18 #define MALIDP_DE_LV_STRIDE0 0x18 +#define MALIDP550_DE_LS_R1_STRIDE 0x28 /* macros to set values into registers */ #define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) -- cgit v1.2.3 From b6573da139ecbee6f2c77392b51266d4521d50ac Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Fri, 10 Mar 2017 10:10:54 -0800 Subject: Input: synaptics-rmi4 - prevent null pointer dereference in f30 If the platform data has f30_data.disable set, f30 in rmi_f30_config() might be null. Prevent a kernel oops by checking for non-null f30. Signed-off-by: Benjamin Tissoires Signed-off-by: Dmitry Torokhov --- drivers/input/rmi4/rmi_f30.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 3422464af229..b8572b342dcb 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c @@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn) rmi_get_platform_data(fn->rmi_dev); int error; + /* can happen if f30_data.disable is set */ + if (!f30) + return 0; + if (pdata->f30_data.trackstick_buttons) { /* Try [re-]establish link to F03. */ f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); -- cgit v1.2.3 From ed46e66cc1b3d684042f92dfa2ab15ee917b4cac Mon Sep 17 00:00:00 2001 From: Oleksandr Tyshchenko Date: Mon, 27 Feb 2017 14:30:25 +0200 Subject: iommu/io-pgtable-arm: Check for leaf entry before dereferencing it Do a check for already installed leaf entry at the current level before dereferencing it in order to avoid walking the page table down with wrong pointer to the next level. Signed-off-by: Oleksandr Tyshchenko CC: Will Deacon CC: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index feacc54bec68..f9bc6ebb8140 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) pte |= ARM_LPAE_PTE_NSTABLE; __arm_lpae_set_pte(ptep, pte, cfg); - } else { + } else if (!iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); + } else { + /* We require an unmap first */ + WARN_ON(!selftest_running); + return -EEXIST; } /* Rinse, repeat */ -- cgit v1.2.3 From a03849e7210277fa212779b7cd9c30e1ab6194b2 Mon Sep 17 00:00:00 2001 From: Oleksandr Tyshchenko Date: Mon, 27 Feb 2017 14:30:26 +0200 Subject: iommu/io-pgtable-arm-v7s: Check for leaf entry before dereferencing it Do a check for already installed leaf entry at the current level before dereferencing it in order to avoid walking the page table down with wrong pointer to the next level. Signed-off-by: Oleksandr Tyshchenko CC: Will Deacon CC: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 1c049e2e12bf..8d6ca28c3e1f 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, pte |= ARM_V7S_ATTR_NS_TABLE; __arm_v7s_set_pte(ptep, pte, 1, cfg); - } else { + } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { cptep = iopte_deref(pte, lvl); + } else { + /* We require an unmap first */ + WARN_ON(!selftest_running); + return -EEXIST; } /* Rinse, repeat */ -- cgit v1.2.3 From d8a8ed9758241e138933c67e40db2db2790eca19 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 9 Mar 2017 13:21:07 -0500 Subject: drm/amd/amdgpu: Disable GFX_PG on Carrizo until compute issues solved Currently compute jobs will stall if GFX_PG is enabled. Until this is resolved we'll disable GFX_PG. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 50bdb24ef8d6..4a785d6acfb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle) /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; if (adev->rev_id != 0x00) { - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | AMD_PG_SUPPORT_CP | -- cgit v1.2.3 From 607523d19c9d67ba4cf7bdaced644f11ed04992c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 10 Mar 2017 12:13:04 +1000 Subject: drm/amdgpu: fix parser init error path to avoid crash in parser fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we don't reset the chunk info in the error path, the subsequent fini path will double free. Reviewed-by: Christian König Signed-off-by: Dave Airlie Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d2d0f60ff36d..99424cb8020b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -240,6 +240,8 @@ free_partial_kdata: for (; i >= 0; i--) drm_free_large(p->chunks[i].kdata); kfree(p->chunks); + p->chunks = NULL; + p->nchunks = 0; put_ctx: amdgpu_ctx_put(p->ctx); free_chunk: -- cgit v1.2.3 From 3fb632e40d7667d8bedfabc28850ac06d5493f54 Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Fri, 10 Mar 2017 11:27:23 +0800 Subject: md: fix super_offset endianness in super_1_rdev_size_change The sb->super_offset should be big-endian, but the rdev->sb_start is in host byte order, so fix this by adding cpu_to_le64. Signed-off-by: Jason Yan Signed-off-by: Shaohua Li --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index cd89ad3c3a0d..6e76d97a8fc3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1879,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) } sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); - sb->super_offset = rdev->sb_start; + sb->super_offset = cpu_to_le64(rdev->sb_start); sb->sb_csum = calc_sb_1_csum(sb); do { md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, -- cgit v1.2.3 From 1345921393ba23b60d3fcf15933e699232ad25ae Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Fri, 10 Mar 2017 11:49:12 +0800 Subject: md: fix incorrect use of lexx_to_cpu in does_sb_need_changing The sb->layout is of type __le32, so we shoud use le32_to_cpu. Signed-off-by: Jason Yan Signed-off-by: Shaohua Li --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 6e76d97a8fc3..f6ae1d67bcd0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2287,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev) /* Check if any mddev parameters have changed */ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || - (mddev->layout != le64_to_cpu(sb->layout)) || + (mddev->layout != le32_to_cpu(sb->layout)) || (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) return true; -- cgit v1.2.3 From a4c2a13129f7c5bcf81704c06851601593303fd5 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 28 Feb 2017 17:14:41 -0800 Subject: Input: i8042 - add TUXEDO BU1406 (N24_25BU) to the nomux list TUXEDO BU1406 does not implement active multiplexing mode properly, and takes around 550 ms in i8042_set_mux_mode(). Given that the device does not have external AUX port, there is no downside in disabling the MUX mode. Reported-by: Paul Menzel Suggested-by: Vojtech Pavlik Reviewed-by: Marcos Paulo de Souza Signed-off-by: Dmitry Torokhov --- drivers/input/serio/i8042-x86ia64io.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index e856b073d533..312bd6ca9198 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -520,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), }, }, + { + /* TUXEDO BU1406 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), + }, + }, { } }; -- cgit v1.2.3 From 92ef6f97a66e580189a41a132d0f8a9f78d6ddce Mon Sep 17 00:00:00 2001 From: Matjaz Hegedic Date: Fri, 10 Mar 2017 14:33:09 -0800 Subject: Input: elan_i2c - add ASUS EeeBook X205TA special touchpad fw EeeBook X205TA is yet another ASUS device with a special touchpad firmware that needs to be accounted for during initialization, or else the touchpad will go into an invalid state upon suspend/resume. Adding the appropriate ic_type and product_id check fixes the problem. Signed-off-by: Matjaz Hegedic Acked-by: KT Liao Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/elan_i2c_core.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 352050e9031d..d5ab9ddef3e3 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data) static int elan_check_ASUS_special_fw(struct elan_tp_data *data) { - if (data->ic_type != 0x0E) - return false; - - switch (data->product_id) { - case 0x05 ... 0x07: - case 0x09: - case 0x13: + if (data->ic_type == 0x0E) { + switch (data->product_id) { + case 0x05 ... 0x07: + case 0x09: + case 0x13: + return true; + } + } else if (data->ic_type == 0x08 && data->product_id == 0x26) { + /* ASUS EeeBook X205TA */ return true; - default: - return false; } + + return false; } static int __elan_initialize(struct elan_tp_data *data) -- cgit v1.2.3 From 0134ed4fb9e78672ee9f7b18007114404c81e63f Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 10 Mar 2017 13:24:22 -0700 Subject: device-dax: fix pmd/pte fault fallback handling Jeff Moyer reports: With a device dax alignment of 4KB or 2MB, I get sigbus when running the attached fio job file for the current kernel (4.11.0-rc1+). If I specify an alignment of 1GB, it works. I turned on debug output, and saw that it was failing in the huge fault code. dax dax1.0: dax_open dax dax1.0: dax_mmap dax dax1.0: dax_dev_huge_fault: fio: write (0x7f08f0a00000 - dax dax1.0: __dax_dev_pud_fault: phys_to_pgoff(0xffffffffcf60 dax dax1.0: dax_release fio config for reproduce: [global] ioengine=dev-dax direct=0 filename=/dev/dax0.0 bs=2m [write] rw=write [read] stonewall rw=read The driver fails to fallback when taking a fault that is larger than the device alignment, or handling a larger fault when a smaller mapping is already established. While we could support larger mappings for a device with a smaller alignment, that change is too large for the immediate fix. The simplest change is to force fallback until the fault size matches the alignment. Fixes: dee410792419 ("/dev/dax, core: file operations and dax-mmap") Cc: Reported-by: Jeff Moyer Signed-off-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dax/dax.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 8d9829ff2a78..a284dc532e46 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) int rc = VM_FAULT_SIGBUS; phys_addr_t phys; pfn_t pfn; + unsigned int fault_size = PAGE_SIZE; if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; @@ -437,6 +438,9 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (fault_size != dax_region->align) + return VM_FAULT_SIGBUS; + phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); if (phys == -1) { dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, @@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) phys_addr_t phys; pgoff_t pgoff; pfn_t pfn; + unsigned int fault_size = PMD_SIZE; if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; @@ -480,6 +485,16 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (fault_size < dax_region->align) + return VM_FAULT_SIGBUS; + else if (fault_size > dax_region->align) + return VM_FAULT_FALLBACK; + + /* if we are outside of the VMA */ + if (pmd_addr < vmf->vma->vm_start || + (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) + return VM_FAULT_SIGBUS; + pgoff = linear_page_index(vmf->vma, pmd_addr); phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); if (phys == -1) { -- cgit v1.2.3 From 70b085b06c4560a69e95607f77bb4c2b2e41943c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 10 Mar 2017 13:24:27 -0700 Subject: device-dax: fix pud fault fallback handling Jeff Moyer reports: With a device dax alignment of 4KB or 2MB, I get sigbus when running the attached fio job file for the current kernel (4.11.0-rc1+). If I specify an alignment of 1GB, it works. I turned on debug output, and saw that it was failing in the huge fault code. dax dax1.0: dax_open dax dax1.0: dax_mmap dax dax1.0: dax_dev_huge_fault: fio: write (0x7f08f0a00000 - dax dax1.0: __dax_dev_pud_fault: phys_to_pgoff(0xffffffffcf60) dax dax1.0: dax_release fio config for reproduce: [global] ioengine=dev-dax direct=0 filename=/dev/dax0.0 bs=2m [write] rw=write [read] stonewall rw=read The driver fails to fallback when taking a fault that is larger than the device alignment, or handling a larger fault when a smaller mapping is already established. While we could support larger mappings for a device with a smaller alignment, that change is too large for the immediate fix. The simplest change is to force fallback until the fault size matches the alignment. Reported-by: Jeff Moyer Signed-off-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dax/dax.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index a284dc532e46..523fecec7bda 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -518,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) phys_addr_t phys; pgoff_t pgoff; pfn_t pfn; + unsigned int fault_size = PUD_SIZE; + if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; @@ -534,6 +536,16 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (fault_size < dax_region->align) + return VM_FAULT_SIGBUS; + else if (fault_size > dax_region->align) + return VM_FAULT_FALLBACK; + + /* if we are outside of the VMA */ + if (pud_addr < vmf->vma->vm_start || + (pud_addr + PUD_SIZE) > vmf->vma->vm_end) + return VM_FAULT_SIGBUS; + pgoff = linear_page_index(vmf->vma, pud_addr); phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); if (phys == -1) { -- cgit v1.2.3 From 52084f89b38cdd896b59627c629915ef1a7bf615 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 9 Mar 2017 16:56:01 -0700 Subject: device-dax: fix debug output typo The debug output for return the return data of pgoff_to_phys() in the fault handlers has 'phys' and 'pgoff' incorrectly swapped. Reported-by: Jeff Moyer Signed-off-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dax/dax.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 523fecec7bda..80c6db279ae1 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -443,7 +443,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, vmf->pgoff); return VM_FAULT_SIGBUS; } @@ -498,7 +498,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) pgoff = linear_page_index(vmf->vma, pmd_addr); phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, pgoff); return VM_FAULT_SIGBUS; } @@ -549,7 +549,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) pgoff = linear_page_index(vmf->vma, pud_addr); phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, pgoff); return VM_FAULT_SIGBUS; } -- cgit v1.2.3 From c962cff17dfa11f4a8227ac16de2b28aea3312e4 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 3 Mar 2017 16:02:23 +0800 Subject: Revert "x86/acpi: Set persistent cpuid <-> nodeid mapping when booting" Revert: dc6db24d2476 ("x86/acpi: Set persistent cpuid <-> nodeid mapping when booting") The mapping of "cpuid <-> nodeid" is established at boot time via ACPI tables to keep associations of workqueues and other node related items consistent across cpu hotplug. But, ACPI tables are unreliable and failures with that boot time mapping have been reported on machines where the ACPI table and the physical information which is retrieved at actual hotplug is inconsistent. Revert the mapping implementation so it can be replaced with a less error prone approach. Signed-off-by: Dou Liyang Tested-by: Xiaolong Ye Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: guzheng1@huawei.com Cc: izumi.taku@jp.fujitsu.com Cc: lenb@kernel.org Link: http://lkml.kernel.org/r/1488528147-2279-2-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/acpi/boot.c | 2 +- drivers/acpi/acpi_processor.c | 5 --- drivers/acpi/bus.c | 1 - drivers/acpi/processor_core.c | 73 ------------------------------------------- include/linux/acpi.h | 3 -- 5 files changed, 1 insertion(+), 83 deletions(-) (limited to 'drivers') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ae32838cac5f..f6b0e87d2388 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -710,7 +710,7 @@ static void __init acpi_set_irq_model_ioapic(void) #ifdef CONFIG_ACPI_HOTPLUG_CPU #include -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 4467a8089ab8..5d208a99d0c9 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu) void __weak arch_unregister_cpu(int cpu) {} -int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) -{ - return -ENODEV; -} - static int acpi_processor_hotadd_init(struct acpi_processor *pr) { unsigned long long sta; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 80cb5eb75b63..34fbe027e73a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1249,7 +1249,6 @@ static int __init acpi_init(void) acpi_wakeup_device_init(); acpi_debugger_init(); acpi_setup_sb_notify_handler(); - acpi_set_processor_mapping(); return 0; } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 611a5585a902..a84386204659 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -278,79 +278,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) } EXPORT_SYMBOL_GPL(acpi_get_cpuid); -#ifdef CONFIG_ACPI_HOTPLUG_CPU -static bool __init -map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid) -{ - int type, id; - u32 acpi_id; - acpi_status status; - acpi_object_type acpi_type; - unsigned long long tmp; - union acpi_object object = { 0 }; - struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - - status = acpi_get_type(handle, &acpi_type); - if (ACPI_FAILURE(status)) - return false; - - switch (acpi_type) { - case ACPI_TYPE_PROCESSOR: - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) - return false; - acpi_id = object.processor.proc_id; - - /* validate the acpi_id */ - if(acpi_processor_validate_proc_id(acpi_id)) - return false; - break; - case ACPI_TYPE_DEVICE: - status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); - if (ACPI_FAILURE(status)) - return false; - acpi_id = tmp; - break; - default: - return false; - } - - type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; - - *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false); - id = acpi_map_cpuid(*phys_id, acpi_id); - - if (id < 0) - return false; - *cpuid = id; - return true; -} - -static acpi_status __init -set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context, - void **rv) -{ - phys_cpuid_t phys_id; - int cpu_id; - - if (!map_processor(handle, &phys_id, &cpu_id)) - return AE_ERROR; - - acpi_map_cpu2node(handle, cpu_id, phys_id); - return AE_OK; -} - -void __init acpi_set_processor_mapping(void) -{ - /* Set persistent cpu <-> node mapping for all processors. */ - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, set_processor_node_mapping, - NULL, NULL, NULL); -} -#else -void __init acpi_set_processor_mapping(void) {} -#endif /* CONFIG_ACPI_HOTPLUG_CPU */ - #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, u64 *phys_addr, int *ioapic_id) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 673acda012af..63a7519b00cc 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -294,11 +294,8 @@ bool acpi_processor_validate_proc_id(int proc_id); int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu); int acpi_unmap_cpu(int cpu); -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ -void acpi_set_processor_mapping(void); - #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif -- cgit v1.2.3 From 09c3f2bd5c7e5f18687663acb6adc6b167484ca5 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 3 Mar 2017 16:02:24 +0800 Subject: Revert"x86/acpi: Enable MADT APIs to return disabled apicids" Revert: 8ad893faf2ea ("x86/acpi: Enable MADT APIs to return disabled apicids") Remove the leftovers of the boot time 'cpuid <-> nodeid' mapping approach. Signed-off-by: Dou Liyang Tested-by: Xiaolong Ye Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: guzheng1@huawei.com Cc: izumi.taku@jp.fujitsu.com Cc: lenb@kernel.org Link: http://lkml.kernel.org/r/1488528147-2279-3-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner --- drivers/acpi/processor_core.c | 60 ++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index a84386204659..b933061b6b60 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void) } static int map_lapic_id(struct acpi_subtable_header *entry, - u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled) + u32 acpi_id, phys_cpuid_t *apic_id) { struct acpi_madt_local_apic *lapic = container_of(entry, struct acpi_madt_local_apic, header); - if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED)) + if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; if (lapic->processor_id != acpi_id) @@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry, } static int map_x2apic_id(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, - bool ignore_disabled) + int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) { struct acpi_madt_local_x2apic *apic = container_of(entry, struct acpi_madt_local_x2apic, header); - if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED)) + if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; if (device_declaration && (apic->uid == acpi_id)) { @@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, } static int map_lsapic_id(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, - bool ignore_disabled) + int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) { struct acpi_madt_local_sapic *lsapic = container_of(entry, struct acpi_madt_local_sapic, header); - if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED)) + if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; if (device_declaration) { @@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, * Retrieve the ARM CPU physical identifier (MPIDR) */ static int map_gicc_mpidr(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr, - bool ignore_disabled) + int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr) { struct acpi_madt_generic_interrupt *gicc = container_of(entry, struct acpi_madt_generic_interrupt, header); - if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED)) + if (!(gicc->flags & ACPI_MADT_ENABLED)) return -ENODEV; /* device_declaration means Device object in DSDT, in the @@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, } static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, - int type, u32 acpi_id, bool ignore_disabled) + int type, u32 acpi_id) { unsigned long madt_end, entry; phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ @@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, struct acpi_subtable_header *header = (struct acpi_subtable_header *)entry; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { - if (!map_lapic_id(header, acpi_id, &phys_id, - ignore_disabled)) + if (!map_lapic_id(header, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { - if (!map_x2apic_id(header, type, acpi_id, &phys_id, - ignore_disabled)) + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { - if (!map_lsapic_id(header, type, acpi_id, &phys_id, - ignore_disabled)) + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { - if (!map_gicc_mpidr(header, type, acpi_id, &phys_id, - ignore_disabled)) + if (!map_gicc_mpidr(header, type, acpi_id, &phys_id)) break; } entry += header->length; @@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id) if (!madt) return PHYS_CPUID_INVALID; - rv = map_madt_entry(madt, 1, acpi_id, true); + rv = map_madt_entry(madt, 1, acpi_id); acpi_put_table((struct acpi_table_header *)madt); return rv; } -static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, - bool ignore_disabled) +static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; @@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, header = (struct acpi_subtable_header *)obj->buffer.pointer; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) - map_lapic_id(header, acpi_id, &phys_id, ignore_disabled); + map_lapic_id(header, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) - map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled); + map_lsapic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) - map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled); + map_x2apic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) - map_gicc_mpidr(header, type, acpi_id, &phys_id, - ignore_disabled); + map_gicc_mpidr(header, type, acpi_id, &phys_id); exit: kfree(buffer.pointer); return phys_id; } -static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type, - u32 acpi_id, bool ignore_disabled) +phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) { phys_cpuid_t phys_id; - phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled); + phys_id = map_mat_entry(handle, type, acpi_id); if (invalid_phys_cpuid(phys_id)) - phys_id = map_madt_entry(get_madt_table(), type, acpi_id, - ignore_disabled); + phys_id = map_madt_entry(get_madt_table(), type, acpi_id); return phys_id; } -phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) -{ - return __acpi_get_phys_id(handle, type, acpi_id, true); -} - int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) { #ifdef CONFIG_SMP -- cgit v1.2.3 From 8c8cb30f49b86333d8e036e1945cf1a78c03577e Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 3 Mar 2017 16:02:26 +0800 Subject: acpi/processor: Implement DEVICE operator for processor enumeration ACPI allows to declare processors either with the PROCESSOR or with the DEVICE operator. The current implementation handles only the PROCESSOR operator. On a system which uses the DEVICE operator for processor enumeration the evaluation fails. Check for the ACPI type of the ACPI handle and evaluate PROCESSOR and DEVICE types separately. Signed-off-by: Dou Liyang Tested-by: Xiaolong Ye Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: guzheng1@huawei.com Cc: izumi.taku@jp.fujitsu.com Cc: lenb@kernel.org Link: http://lkml.kernel.org/r/1488528147-2279-5-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner --- drivers/acpi/acpi_processor.c | 39 ++++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 5d208a99d0c9..9a98d7e00200 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -633,25 +633,50 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, void **rv) { acpi_status status; + acpi_object_type acpi_type; + unsigned long long uid; union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); + status = acpi_get_type(handle, &acpi_type); if (ACPI_FAILURE(status)) - acpi_handle_info(handle, "Not get the processor object\n"); - else - processor_validated_ids_update(object.processor.proc_id); + return false; + + switch (acpi_type) { + case ACPI_TYPE_PROCESSOR: + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) + goto err; + uid = object.processor.proc_id; + break; + + case ACPI_TYPE_DEVICE: + status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); + if (ACPI_FAILURE(status)) + goto err; + break; + default: + goto err; + } + + processor_validated_ids_update(uid); + return true; + +err: + acpi_handle_info(handle, "Invalid processor object\n"); + return false; - return AE_OK; } -static void __init acpi_processor_check_duplicates(void) +void __init acpi_processor_check_duplicates(void) { - /* Search all processor nodes in ACPI namespace */ + /* check the correctness for all processors in ACPI namespace */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_processor_ids_walk, NULL, NULL, NULL); + acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, + NULL, NULL); } bool __init acpi_processor_validate_proc_id(int proc_id) -- cgit v1.2.3 From a77d6cd968497792e072b74dff45b891ba778ddb Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 3 Mar 2017 16:02:27 +0800 Subject: acpi/processor: Check for duplicate processor ids at hotplug time The check for duplicate processor ids happens at boot time based on the ACPI table contents, but the final sanity checks for a processor happen at hotplug time. At hotplug time, where the physical information is available, which might differ from the ACPI table information, a check for duplicate processor ids is missing. Add it to the hotplug checks and rename the function so it better reflects its purpose. Signed-off-by: Dou Liyang Tested-by: Xiaolong Ye Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: guzheng1@huawei.com Cc: izumi.taku@jp.fujitsu.com Cc: lenb@kernel.org Link: http://lkml.kernel.org/r/1488528147-2279-6-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner --- drivers/acpi/acpi_processor.c | 13 ++++++++++--- include/linux/acpi.h | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 9a98d7e00200..0143135b3abe 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -280,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->acpi_id = value; } + if (acpi_duplicate_processor_id(pr->acpi_id)) { + dev_err(&device->dev, + "Failed to get unique processor _UID (0x%x)\n", + pr->acpi_id); + return -ENODEV; + } + pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); if (invalid_phys_cpuid(pr->phys_id)) @@ -580,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = { static int nr_unique_ids __initdata; /* The number of the duplicate processor IDs */ -static int nr_duplicate_ids __initdata; +static int nr_duplicate_ids; /* Used to store the unique processor IDs */ static int unique_processor_ids[] __initdata = { @@ -588,7 +595,7 @@ static int unique_processor_ids[] __initdata = { }; /* Used to store the duplicate processor IDs */ -static int duplicate_processor_ids[] __initdata = { +static int duplicate_processor_ids[] = { [0 ... NR_CPUS - 1] = -1, }; @@ -679,7 +686,7 @@ void __init acpi_processor_check_duplicates(void) NULL, NULL); } -bool __init acpi_processor_validate_proc_id(int proc_id) +bool acpi_duplicate_processor_id(int proc_id) { int i; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 63a7519b00cc..9b05886f9773 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -287,7 +287,7 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) } /* Validate the processor object's proc_id */ -bool acpi_processor_validate_proc_id(int proc_id); +bool acpi_duplicate_processor_id(int proc_id); #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ -- cgit v1.2.3 From 33d8c15559df4f0bce25d7e16ebb5879e249f2e7 Mon Sep 17 00:00:00 2001 From: Romain Izard Date: Thu, 9 Mar 2017 17:58:38 +0100 Subject: Revert "clocksource/drivers/tcb_clksrc: Use 32 bit tcb as sched_clock" This reverts commit 7b9f1d16e6d1 ("clocksource/drivers/tcb_clksrc: Use 32 bit tcb as sched_clock"). In the current state, the kernel warns against a late registration of the new sched_clock, the printk clock resets after only a few minutes, and it seems that scheduling can be affected as well. Signed-off-by: Romain Izard Signed-off-by: Daniel Lezcano --- drivers/clocksource/tcb_clksrc.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 745844ee973e..d4ca9962a759 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -10,7 +10,6 @@ #include #include #include -#include /* @@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs) return (upper << 16) | lower; } -static u32 tc_get_cv32(void) -{ - return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); -} - static u64 tc_get_cycles32(struct clocksource *cs) { - return tc_get_cv32(); + return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); } static struct clocksource clksrc = { @@ -75,11 +69,6 @@ static struct clocksource clksrc = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static u64 notrace tc_read_sched_clock(void) -{ - return tc_get_cv32(); -} - #ifdef CONFIG_GENERIC_CLOCKEVENTS struct tc_clkevt_device { @@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void) clksrc.read = tc_get_cycles32; /* setup ony channel 0 */ tcb_setup_single_chan(tc, best_divisor_idx); - - /* register sched_clock on chips with single 32 bit counter */ - sched_clock_register(tc_read_sched_clock, 32, divided_rate); } else { /* tclib will give us three clocks no matter what the * underlying platform supports. -- cgit v1.2.3 From bafa687dccbe16cbf9b1824409836d79d6dc6543 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 23 Feb 2017 12:31:54 +0200 Subject: extcon: int3496: Propagate error code of gpiod_to_irq() gpiod_to_irq() doesn't return 0. Thus, we just adjust condition and replace -EINVAL by actual error code it returns. Signed-off-by: Andy Shevchenko Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-intel-int3496.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index a3131b036de6..38eb6cab938f 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -100,9 +100,9 @@ static int int3496_probe(struct platform_device *pdev) } data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); - if (data->usb_id_irq <= 0) { + if (data->usb_id_irq < 0) { dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); - return -EINVAL; + return data->usb_id_irq; } data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", -- cgit v1.2.3 From 9fa1d7537242bd580ffa99c4725a0407096aad26 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 28 Feb 2017 10:11:45 +0200 Subject: drm/omap: fix dmabuf mmap for dma_alloc'ed buffers omap_gem_dmabuf_mmap() returns an error (with a WARN) when called for a buffer which is allocated with dma_alloc_*(). This prevents dmabuf mmap from working on SoCs without DMM, e.g. AM4 and OMAP3. I could not find any reason for omap_gem_dmabuf_mmap() rejecting such buffers, and just removing the if() fixes the limitation. Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index af267c35d813..ee5883f59be5 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct drm_gem_object *obj = buffer->priv; int ret = 0; - if (WARN_ON(!obj->filp)) - return -EINVAL; - ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); if (ret < 0) return ret; -- cgit v1.2.3 From aac66bf5f916f645bd57029490a72c3f91f2c274 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 6 Mar 2017 23:54:01 +0000 Subject: drm/i915: use correct node for handling cache domain eviction It looks like we were incorrectly comparing vma->node against itself instead of the target node, when evicting for a node on systems where we need guard pages between regions with different cache domains. As a consequence we can end up trying to needlessly evict neighbouring nodes, even if they have the same cache domain, and if they were pinned we would fail the eviction. Fixes: 625d988acc28 ("drm/i915: Extract reserving space in the GTT to a helper") Signed-off-by: Matthew Auld Cc: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170306235414.23407-3-matthew.auld@intel.com Signed-off-by: Chris Wilson (cherry picked from commit fe65cbdbc97929e4a522716ed279a36783656142) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem_evict.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index c181b1bb3d2c..3be2503aa042 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * those as well to make room for our guard pages. */ if (check_color) { - if (vma->node.start + vma->node.size == node->start) { - if (vma->node.color == node->color) + if (node->start + node->size == target->start) { + if (node->color == target->color) continue; } - if (vma->node.start == node->start + node->size) { - if (vma->node.color == node->color) + if (node->start == target->start + target->size) { + if (node->color == target->color) continue; } } -- cgit v1.2.3 From 3a0d137de035cc8c70194d9988ded61825b5ff8a Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 8 Mar 2017 13:00:07 +0100 Subject: drm/i915: Nuke skl_update_plane debug message from the pipe update critical section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit printks are slow so we should not be doing them from the vblank evade critical section. These could explain why we sometimes seem to blow past our 100 usec deadline. The problem has been there ever since commit c331879ce8ea ("drm/i915: skylake sprite plane scaling using shared scalers.") but it may not have been readily visible until commit e1edbd44e23b ("drm/i915: Complain if we take too long under vblank evasion.") increased our chances of noticing it. Signed-off-by: Maarten Lankhorst Cc: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1488974407-25175-1-git-send-email-maarten.lankhorst@linux.intel.com Fixes: c331879ce8ea ("drm/i915: skylake sprite plane scaling using shared scalers") Cc: # v4.2+ Reviewed-by: Ville Syrjälä [mlankhorst: Add missing tags, point to the correct offending commit] (cherry picked from commit d38146b9ee16264ff9a88bf3391ab9f2f5af3646) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_sprite.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9ef54688872a..9481ca9a3ae7 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane, int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler; - DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", - plane_id, PS_PLANE_SEL(plane_id)); - scaler = &crtc_state->scaler_state.scalers[scaler_id]; I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), -- cgit v1.2.3 From 6aef660370a9c246956ba6d01eebd8063c4214cb Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 10 Mar 2017 09:32:49 +0000 Subject: drm/i915: Fix forcewake active domain tracking In commit 003342a50021 ("drm/i915: Keep track of active forcewake domains in a bitmask") I forgot to adjust the newly introduce fw_domains_active state across reset. This caused the assert_forcewakes_inactive to trigger during suspend and resume if there were user held forcewakes. v2: Bitmask checks are required since vfuncs are not always present. v3: Move bitmask tracking to get/put vfunc for simplicity. (Chris Wilson) Signed-off-by: Tvrtko Ursulin Fixes: 003342a50021 ("drm/i915: Keep track of active forcewake domains in a bitmask") Testcase: igt/drv_suspend/forcewake Cc: Tvrtko Ursulin Cc: "Paneri, Praveen" Cc: Chris Wilson Cc: Daniel Vetter Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Cc: v4.10+ Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170310093249.4484-1-tvrtko.ursulin@linux.intel.com (cherry picked from commit b8473050805f35add97f3ff57570d55a01808df5) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_uncore.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index abe08885a5ba..b7ff592b14f5 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma for_each_fw_domain_masked(d, fw_domains, dev_priv) fw_domain_wait_ack(d); + + dev_priv->uncore.fw_domains_active |= fw_domains; } static void @@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma fw_domain_put(d); fw_domain_posting_read(d); } + + dev_priv->uncore.fw_domains_active &= ~fw_domains; } static void @@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) if (WARN_ON(domain->wake_count == 0)) domain->wake_count++; - if (--domain->wake_count == 0) { + if (--domain->wake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); - dev_priv->uncore.fw_domains_active &= ~domain->mask; - } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, fw_domains &= ~domain->mask; } - if (fw_domains) { + if (fw_domains) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); - dev_priv->uncore.fw_domains_active |= fw_domains; - } } /** @@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, fw_domain_arm_timer(domain); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); - dev_priv->uncore.fw_domains_active |= fw_domains; } static inline void __force_wake_auto(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From c5f7c5a9a0f84c511a8a336491f9b8a3060b6517 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Mon, 6 Mar 2017 16:21:16 +0200 Subject: drivers, xen: convert grant_map.users from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index c77a0751a311..f3bf8f4e2d6c 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -86,7 +87,7 @@ struct grant_map { int index; int count; int flags; - atomic_t users; + refcount_t users; struct unmap_notify notify; struct ioctl_gntdev_grant_ref *grants; struct gnttab_map_grant_ref *map_ops; @@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) add->index = 0; add->count = count; - atomic_set(&add->users, 1); + refcount_set(&add->users, 1); return add; @@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) if (!map) return; - if (!atomic_dec_and_test(&map->users)) + if (!refcount_dec_and_test(&map->users)) return; atomic_sub(map->count, &pages_mapped); @@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma) struct grant_map *map = vma->vm_private_data; pr_debug("gntdev_vma_open %p\n", vma); - atomic_inc(&map->users); + refcount_inc(&map->users); } static void gntdev_vma_close(struct vm_area_struct *vma) @@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto unlock_out; } - atomic_inc(&map->users); + refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; -- cgit v1.2.3 From 6e7408acd04d06c04981c0c0fb5a2462b16fae4f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 12 Mar 2017 18:12:56 +0100 Subject: cpufreq: intel_pstate: Update pid_params.sample_rate_ns in pid_param_set() Fix the debugfs interface for PID tuning to actually update pid_params.sample_rate_ns on PID parameters updates, as changing pid_params.sample_rate_ms via debugfs has no effect now. Fixes: a4675fbc4a7a (cpufreq: intel_pstate: Replace timers with utilization update callbacks) Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3d37219a0dd7..f9fe910f3b83 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -989,6 +989,7 @@ static void intel_pstate_update_policies(void) static int pid_param_set(void *data, u64 val) { *(u32 *)data = val; + pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; intel_pstate_reset_all_pid(); return 0; } -- cgit v1.2.3 From 09498087f922bb623b66db9e89c6fd11a3799867 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Thu, 9 Mar 2017 07:41:16 +0100 Subject: serial: 8250_dw: Honor clk_round_rate errors in dw8250_set_termios clk_round_rate returns a signed long and may possibly return errors in it, for example if there is no possible rate. Till now dw8250_set_termios ignored any error, the signednes and would just use the value as input to clk_set_rate. This of course falls apart if there is an actual error, so check for errors and only try to set a rate if the value is actually valid. This turned up on some Rockchip platforms after commit 6a171b299379 ("serial: 8250_dw: Allow hardware flow control to be used") enabled set_termios callback in all cases, not only ACPI. Fixes: 6a171b299379 ("serial: 8250_dw: Allow hardware flow control to be used") Signed-off-by: Heiko Stuebner Reviewed-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dw.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 6ee55a2d47bb..223ac234ddb2 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, { unsigned int baud = tty_termios_baud_rate(termios); struct dw8250_data *d = p->private_data; - unsigned int rate; + long rate; int ret; if (IS_ERR(d->clk) || !old) @@ -265,7 +265,10 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, clk_disable_unprepare(d->clk); rate = clk_round_rate(d->clk, baud * 16); - ret = clk_set_rate(d->clk, rate); + if (rate < 0) + ret = rate; + else + ret = clk_set_rate(d->clk, rate); clk_prepare_enable(d->clk); if (!ret) -- cgit v1.2.3 From b15bfbe6427712d1b992bf806a6df9c05002a0a4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Sat, 4 Mar 2017 13:09:58 +0000 Subject: serial: 8250_dw: Fix breakage when HAVE_CLK=n Commit 6a171b299379 ("serial: 8250_dw: Allow hardware flow control to be used") recently broke the 8250_dw driver on platforms which don't select HAVE_CLK, as dw8250_set_termios() gets confused by the behaviour of the fallback HAVE_CLK=n clock API in linux/clk.h which pretends everything is fine but returns (valid) NULL clocks and 0 HZ clock rates. That 0 rate is written into the uartclk resulting in a crash at boot, e.g. on Cavium Octeon III based UTM-8 we get something like this: 1180000000800.serial: ttyS0 at MMIO 0x1180000000800 (irq = 41, base_baud = 25000000) is a OCTEON ------------[ cut here ]------------ WARNING: CPU: 2 PID: 1 at drivers/tty/serial/serial_core.c:441 uart_get_baud_rate+0xfc/0x1f0 ... Call Trace: ... [] uart_get_baud_rate+0xfc/0x1f0 [] serial8250_do_set_termios+0xb0/0x440 [] uart_set_options+0xe8/0x190 [] serial8250_console_setup+0x84/0x158 [] univ8250_console_setup+0x54/0x70 [] register_console+0x1c8/0x418 [] uart_add_one_port+0x434/0x4b0 [] serial8250_register_8250_port+0x2d8/0x440 [] dw8250_probe+0x388/0x5e8 ... The clock API is defined such that NULL is a valid clock handle so it wouldn't be right to check explicitly for NULL. Instead treat a clk_round_rate() return value of 0 as an error which prevents uartclk being overwritten. Fixes: 6a171b299379 ("serial: 8250_dw: Allow hardware flow control to be used") Signed-off-by: James Hogan Cc: Kefeng Wang Cc: David Daney Cc: Russell King Cc: linux-serial@vger.kernel.org Cc: linux-clk@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: bcm-kernel-feedback-list@broadcom.com Reviewed-by: Andy Shevchenko Reviewed-by: Jason Uy Reviewed-by: Heiko Stuebner Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dw.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 223ac234ddb2..e65808c482f1 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -267,6 +267,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, rate = clk_round_rate(d->clk, baud * 16); if (rate < 0) ret = rate; + else if (rate == 0) + ret = -ENOENT; else ret = clk_set_rate(d->clk, rate); clk_prepare_enable(d->clk); -- cgit v1.2.3 From 542ed784671d4678406c77ed6dd01593a0cdbea1 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Tue, 28 Feb 2017 14:30:33 -0600 Subject: tty: acpi/spcr: QDF2400 E44 checks for wrong OEM revision For Qualcomm Technologies QDF2400 SOCs that are affected by erratum E44, the ACPI oem_revision field is actually set to 1, not 0. Fixes: d8a4995bcea1 ("tty: pl011: Work around QDF2400 E44 stuck BUSY bit") Tested-by: Manoj Iyer Signed-off-by: Timur Tabi Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/spcr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 01c94669a2b0..3afa8c1fa127 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) return true; if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && - h->oem_revision == 0) + h->oem_revision == 1) return true; return false; -- cgit v1.2.3 From 3f8ed54aee491bbb83656592c2d0ad7b78d045ca Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 13 Mar 2017 18:30:12 -0700 Subject: cpufreq: intel_pstate: Correct frequency setting in the HWP mode In the functions intel_pstate_hwp_set(), min/max range from HWP capability MSR along with max_perf_pct and min_perf_pct, is used to set the HWP request MSR. In some cases this doesn't result in the correct HWP max/min in HWP request. For example: In the following case: HWP capabilities from MSR 0x771 0x70a1220 Here cpufreq min/max frequencies from above MSR dump are 700MHz and 3.2GHz respectively. This will result in hwp_min = 0x07 hwp_max = 0x20 To limit max frequency to 2GHz: perf_limits->max_perf_pct = 63 (2GHz as a percent of 3.2GHz rounded up) With the current calculation: adj_range = max_perf_pct * range / 100; adj_range = 63 * (32 - 7) / 100 adj_range = 15 max = hw_min + adj_range; max = 7 + 15 = 22 This will result in HWP request of 0x160f, which will result in a frequency cap of 2.2GHz not 2GHz. The problem with the above calculation is that hwp_min of 7 is treated as 0% in the range. But max_perf_pct is calculated with respect to minimum as 0 and max as 3.2GHz or hwp_max, so adding hwp_min to it will result in more than the desired. Since the min_perf_pct and max_perf_pct is already a percent of max frequency or hwp_max, this min/max HWP request value can be calculated directly applying these percentage to hwp_max. Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f9fe910f3b83..ee12641ee010 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -845,7 +845,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { static void intel_pstate_hwp_set(struct cpufreq_policy *policy) { - int min, hw_min, max, hw_max, cpu, range, adj_range; + int min, hw_min, max, hw_max, cpu; struct perf_limits *perf_limits = limits; u64 value, cap; @@ -863,20 +863,17 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) hw_max = HWP_GUARANTEED_PERF(cap); else hw_max = HWP_HIGHEST_PERF(cap); - range = hw_max - hw_min; max_perf_pct = perf_limits->max_perf_pct; min_perf_pct = perf_limits->min_perf_pct; + min = hw_max * min_perf_pct / 100; rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); - adj_range = min_perf_pct * range / 100; - min = hw_min + adj_range; + value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); - adj_range = max_perf_pct * range / 100; - max = hw_min + adj_range; - + max = hw_max * max_perf_pct / 100; value &= ~HWP_MAX_PERF(~0L); value |= HWP_MAX_PERF(max); -- cgit v1.2.3 From 64ff64b90e62c860772fd0be50b7cfcef1d8a9b2 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Mar 2017 03:22:12 -0800 Subject: scsi: megaraid_sas: enable intx only if msix request fails Without this fix, driver will enable INTx Interrupt pin even though MSI-x vectors are enabled. See below lspci output. DisINTx is unset for MSIx setup. lspci -s 85:00.0 -vvv |grep INT |grep Control Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx- After applying this fix, driver will enable INTx Interrupt pin only if Legacy interrupt method is required. See below lspci output. DisINTx is set for MSIx setup. lspci -s 85:00.0 -vvv |grep INT |grep Control Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx+ Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7ac9a9ee9bd4..016ffcebaf66 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5034,10 +5034,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) &instance->irq_context[j]); /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; - if (is_probe) + if (is_probe) { + pci_free_irq_vectors(instance->pdev); return megasas_setup_irqs_ioapic(instance); - else + } else { return -1; + } } } return 0; @@ -5277,9 +5279,11 @@ static int megasas_init_fw(struct megasas_instance *instance) MPI2_REPLY_POST_HOST_INDEX_OFFSET); } - i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); - if (i < 0) - goto fail_setup_irqs; + if (!instance->msix_vectors) { + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); + if (i < 0) + goto fail_setup_irqs; + } dev_info(&instance->pdev->dev, "firmware supports msix\t: (%d)", fw_msix_count); -- cgit v1.2.3 From 49524b3c6e12375627ddd870613fcc6b24909898 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Mar 2017 03:22:13 -0800 Subject: scsi: megaraid_sas: add correct return type check for ldio hint logic for raid1 Return value check of atomic_dec_if_positive is required as it returns old value minus one. Without this fix, driver will send small ios to firmware path and that will be a performance issue. Not critical, but good to have r1_ldio_hint as default value in sdev private. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 3 +++ drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 016ffcebaf66..0016f12cc563 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1963,6 +1963,9 @@ scan_target: if (!mr_device_priv_data) return -ENOMEM; sdev->hostdata = mr_device_priv_data; + + atomic_set(&mr_device_priv_data->r1_ldio_hint, + instance->r1_ldio_hint_default); return 0; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 29650ba669da..ebd746e2d97c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = false; atomic_dec(&instance->fw_outstanding); } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || - atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { + (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) { fp_possible = false; atomic_dec(&instance->fw_outstanding); if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) -- cgit v1.2.3 From 874d025da667d19b728141437ccbefe9dbaf9e7b Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Mar 2017 03:22:14 -0800 Subject: scsi: megaraid_sas: raid6 also require cpuSel check same as raid5 Without this fix, raid6 performance will not be optimal. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ebd746e2d97c..f990ab4d45e1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, cpu_sel = MR_RAID_CTX_CPUSEL_1; if (is_stream_detected(rctx_g35) && - (raid->level == 5) && + ((raid->level == 5) || (raid->level == 6)) && (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) cpu_sel = MR_RAID_CTX_CPUSEL_0; -- cgit v1.2.3 From 22487b66594f18f2a7c211f3ab8bb02dec74d37b Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Mar 2017 03:22:15 -0800 Subject: scsi: megaraid_sas: Driver version upgrade Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index e7e5974e1a2c..2b209bbb4c91 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.701.16.00-rc1" -#define MEGASAS_RELDATE "February 2, 2017" +#define MEGASAS_VERSION "07.701.17.00-rc1" +#define MEGASAS_RELDATE "March 2, 2017" /* * Device IDs -- cgit v1.2.3 From b0addd3fa6bcd119be9428996d5d4522479ab240 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:47:48 +0100 Subject: USB: idmouse: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/idmouse.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 8b9fd7534f69..502bfe30a077 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface, if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; + if (iface_desc->desc.bNumEndpoints < 1) + return -ENODEV; + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) -- cgit v1.2.3 From 1dc56c52d2484be09c7398a5207d6b11a4256be9 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:47:49 +0100 Subject: USB: lvtest: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should the probed device lack endpoints. Note that this driver does not bind to any devices by default. Fixes: ce21bfe603b3 ("USB: Add LVS Test device driver") Cc: stable # 3.17 Cc: Pratyush Anand Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/lvstest.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 77176511658f..d3d124753266 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf, hdev = interface_to_usbdev(intf); desc = intf->cur_altsetting; + + if (desc->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &desc->endpoint[0].desc; /* valid only for SS root hub */ -- cgit v1.2.3 From f259ca3eed6e4b79ac3d5c5c9fb259fb46e86217 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:47:50 +0100 Subject: USB: uss720: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer or accessing memory beyond the endpoint array should a malicious device lack the expected endpoints. Note that the endpoint access that causes the NULL-deref is currently only used for debugging purposes during probe so the oops only happens when dynamic debugging is enabled. This means the driver could be rewritten to continue to accept device with only two endpoints, should such devices exist. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/uss720.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index e45a3a680db8..07014cad6dbe 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf, interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints < 3) { + usb_put_dev(usbdev); + return -ENODEV; + } + /* * Allocate parport interface */ -- cgit v1.2.3 From 03ace948a4eb89d1cf51c06afdfc41ebca5fdb27 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:47:51 +0100 Subject: USB: wusbcore: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer or accessing memory beyond the endpoint array should a malicious device lack the expected endpoints. This specifically fixes the NULL-pointer dereference when probing HWA HC devices. Fixes: df3654236e31 ("wusb: add the Wire Adapter (WA) core") Cc: stable # 2.6.28 Cc: Inaky Perez-Gonzalez Cc: David Vrabel Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-hc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c index 252c7bd9218a..d01496fd27fe 100644 --- a/drivers/usb/wusbcore/wa-hc.c +++ b/drivers/usb/wusbcore/wa-hc.c @@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface, int result; struct device *dev = &iface->dev; + if (iface->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + result = wa_rpipes_create(wa); if (result < 0) goto error_rpipes_create; -- cgit v1.2.3 From daf229b15907fbfdb6ee183aac8ca428cb57e361 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:47:52 +0100 Subject: uwb: hwa-rc: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Note that the dereference happens in the start callback which is called during probe. Fixes: de520b8bd552 ("uwb: add HWA radio controller driver") Cc: stable # 2.6.28 Cc: Inaky Perez-Gonzalez Cc: David Vrabel Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/uwb/hwa-rc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 0aa6c3c29d17..35a1e777b449 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface, struct hwarc *hwarc; struct device *dev = &iface->dev; + if (iface->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + result = -ENOMEM; uwb_rc = uwb_rc_alloc(); if (uwb_rc == NULL) { -- cgit v1.2.3 From 4ce362711d78a4999011add3115b8f4b0bc25e8c Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:47:53 +0100 Subject: uwb: i1480-dfu: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Note that the dereference happens in the cmd and wait_init_done callbacks which are called during probe. Fixes: 1ba47da52712 ("uwb: add the i1480 DFU driver") Cc: stable # 2.6.28 Cc: Inaky Perez-Gonzalez Cc: David Vrabel Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/uwb/i1480/dfu/usb.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 2bfc846ac071..6345e85822a4 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) result); } + if (iface->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + result = -ENOMEM; i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); if (i1480_usb == NULL) { -- cgit v1.2.3 From 3243367b209faed5c320a4e5f9a565ee2a2ba958 Mon Sep 17 00:00:00 2001 From: Samuel Thibault Date: Mon, 13 Mar 2017 20:50:08 +0100 Subject: usb-core: Add LINEAR_FRAME_INTR_BINTERVAL USB quirk Some USB 2.0 devices erroneously report millisecond values in bInterval. The generic config code manages to catch most of them, but in some cases it's not completely enough. The case at stake here is a USB 2.0 braille device, which wants to announce 10ms and thus sets bInterval to 10, but with the USB 2.0 computation that yields to 64ms. It happens that one can type fast enough to reach this interval and get the device buffers overflown, leading to problematic latencies. The generic config code does not catch this case because the 64ms is considered a sane enough value. This change thus adds a USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL quirk to mark devices which actually report milliseconds in bInterval, and marks Vario Ultra devices as needing it. Signed-off-by: Samuel Thibault Acked-by: Alan Stern Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/config.c | 10 ++++++++++ drivers/usb/core/quirks.c | 8 ++++++++ include/linux/usb/quirks.h | 6 ++++++ 3 files changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 25dbd8c7aec7..4be52c602e9b 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, /* * Adjust bInterval for quirked devices. + */ + /* + * This quirk fixes bIntervals reported in ms. + */ + if (to_usb_device(ddev)->quirks & + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { + n = clamp(fls(d->bInterval) + 3, i, j); + i = j = n; + } + /* * This quirk fixes bIntervals reported in * linear microframes. */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 24f9f98968a5..96b21b0dac1e 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = { /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Baum Vario Ultra */ + { USB_DEVICE(0x0904, 0x6101), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + { USB_DEVICE(0x0904, 0x6102), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + { USB_DEVICE(0x0904, 0x6103), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 1d0043dc34e4..de2a722fe3cf 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -50,4 +50,10 @@ /* device can't handle Link Power Management */ #define USB_QUIRK_NO_LPM BIT(10) +/* + * Device reports its bInterval as linear frames instead of the + * USB 2.0 calculation. + */ +#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) + #endif /* __LINUX_USB_QUIRKS_H */ -- cgit v1.2.3 From 0090114d336a9604aa2d90bc83f20f7cd121b76c Mon Sep 17 00:00:00 2001 From: Bin Liu Date: Fri, 10 Mar 2017 14:43:35 -0600 Subject: usb: musb: cppi41: don't check early-TX-interrupt for Isoch transfer The CPPI 4.1 driver polls register to workaround the premature TX interrupt issue, but it causes audio playback underrun when triggered in Isoch transfers. Isoch doesn't do back-to-back transfers, the TX should be done by the time the next transfer is scheduled. So skip this polling workaround for Isoch transfer. Fixes: a655f481d83d6 ("usb: musb: musb_cppi41: handle pre-mature TX complete interrupt") Cc: #4.1+ Reported-by: Alexandre Bailon Acked-by: Sebastian Andrzej Siewior Tested-by: Alexandre Bailon Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_cppi41.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 00e272bfee39..355655f8a3fb 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data, transferred < cppi41_channel->packet_sz) cppi41_channel->prog_len = 0; - if (cppi41_channel->is_tx) - empty = musb_is_tx_fifo_empty(hw_ep); + if (cppi41_channel->is_tx) { + u8 type; + + if (is_host_active(musb)) + type = hw_ep->out_qh->type; + else + type = hw_ep->ep_in.type; + + if (type == USB_ENDPOINT_XFER_ISOC) + /* + * Don't use the early-TX-interrupt workaround below + * for Isoch transfter. Since Isoch are periodic + * transfer, by the time the next transfer is + * scheduled, the current one should be done already. + * + * This avoids audio playback underrun issue. + */ + empty = true; + else + empty = musb_is_tx_fifo_empty(hw_ep); + } if (!cppi41_channel->is_tx || empty) { cppi41_trans_done(cppi41_channel); -- cgit v1.2.3 From 6b7ad496084e3d8ffa844a02e0f7f76f3eb82203 Mon Sep 17 00:00:00 2001 From: Bin Liu Date: Fri, 10 Mar 2017 14:43:36 -0600 Subject: usb: musb: dsps: fix iounmap in error and exit paths Cleanly iounmap the pointer in error and exit paths. Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_dsps.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 7c047c4a2565..9c7ee26ef388 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev) if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { ret = dsps_setup_optional_vbus_irq(pdev, glue); if (ret) - return ret; + goto err_iounmap; } platform_set_drvdata(pdev, glue); @@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev) err: pm_runtime_disable(&pdev->dev); +err_iounmap: + iounmap(glue->usbss_base); return ret; } @@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev) platform_device_unregister(glue->musb); pm_runtime_disable(&pdev->dev); + iounmap(glue->usbss_base); return 0; } -- cgit v1.2.3 From bc1e2154542071e3cfe1734b143af9b8bdacf8bd Mon Sep 17 00:00:00 2001 From: Bin Liu Date: Fri, 10 Mar 2017 14:43:37 -0600 Subject: usb: musb: fix possible spinlock deadlock The DSPS glue calls del_timer_sync() in its musb_platform_disable() implementation, which requires the caller to not hold a lock. But musb_remove() calls musb_platform_disable() will musb->lock held. This could causes spinlock deadlock. So change musb_remove() to call musb_platform_disable() without holds musb->lock. This doesn't impact the musb_platform_disable implementation in other glue drivers. root@am335x-evm:~# modprobe -r musb-dsps [ 126.134879] musb-hdrc musb-hdrc.1: remove, state 1 [ 126.140465] usb usb2: USB disconnect, device number 1 [ 126.146178] usb 2-1: USB disconnect, device number 2 [ 126.416985] musb-hdrc musb-hdrc.1: USB bus 2 deregistered [ 126.423943] [ 126.425525] ====================================================== [ 126.431997] [ INFO: possible circular locking dependency detected ] [ 126.438564] 4.11.0-rc1-00003-g1557f13bca04-dirty #77 Not tainted [ 126.444852] ------------------------------------------------------- [ 126.451414] modprobe/778 is trying to acquire lock: [ 126.456523] (((&glue->timer))){+.-...}, at: [] del_timer_sync+0x0/0xd0 [ 126.464403] [ 126.464403] but task is already holding lock: [ 126.470511] (&(&musb->lock)->rlock){-.-...}, at: [] musb_remove+0x50/0x1 30 [musb_hdrc] [ 126.479965] [ 126.479965] which lock already depends on the new lock. [ 126.479965] [ 126.488531] [ 126.488531] the existing dependency chain (in reverse order) is: [ 126.496368] [ 126.496368] -> #1 (&(&musb->lock)->rlock){-.-...}: [ 126.502968] otg_timer+0x80/0xec [musb_dsps] [ 126.507990] call_timer_fn+0xb4/0x390 [ 126.512372] expire_timers+0xf0/0x1fc [ 126.516754] run_timer_softirq+0x80/0x178 [ 126.521511] __do_softirq+0xc4/0x554 [ 126.525802] irq_exit+0xe8/0x158 [ 126.529735] __handle_domain_irq+0x58/0xb8 [ 126.534583] __irq_usr+0x54/0x80 [ 126.538507] [ 126.538507] -> #0 (((&glue->timer))){+.-...}: [ 126.544636] del_timer_sync+0x40/0xd0 [ 126.549066] musb_remove+0x6c/0x130 [musb_hdrc] [ 126.554370] platform_drv_remove+0x24/0x3c [ 126.559206] device_release_driver_internal+0x14c/0x1e0 [ 126.565225] bus_remove_device+0xd8/0x108 [ 126.569970] device_del+0x1e4/0x308 [ 126.574170] platform_device_del+0x24/0x8c [ 126.579006] platform_device_unregister+0xc/0x20 [ 126.584394] dsps_remove+0x14/0x30 [musb_dsps] [ 126.589595] platform_drv_remove+0x24/0x3c [ 126.594432] device_release_driver_internal+0x14c/0x1e0 [ 126.600450] driver_detach+0x38/0x6c [ 126.604740] bus_remove_driver+0x4c/0xa0 [ 126.609407] SyS_delete_module+0x11c/0x1e4 [ 126.614252] __sys_trace_return+0x0/0x10 Fixes: ea2f35c01d5ea ("usb: musb: Fix sleeping function called from invalid context for hdrc glue") Cc: #4.9+ Acked-by: Tony Lindgren Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index d8bae6ca8904..0c3664ab705e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev) musb_host_cleanup(musb); musb_gadget_cleanup(musb); - spin_lock_irqsave(&musb->lock, flags); musb_platform_disable(musb); + spin_lock_irqsave(&musb->lock, flags); musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); spin_unlock_irqrestore(&musb->lock, flags); -- cgit v1.2.3 From 0043c1dfbec7b6e2427409059b05347d6f51aa9f Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 8 Feb 2017 09:24:25 +0000 Subject: serial: st-asc: Use new GPIOD API to obtain RTS pin The commits mentioned below adapt the GPIO API to allow more information to be passed directly through devm_get_gpiod_from_child() in the first instance. This facilitates the removal of subsequent calls, such as gpiod_direction_output(). This patch firstly moves to utilise the new API and secondly removes the now superfluous call do set the direction. Reported-by: Stephen Rothwell Suggested-by: Boris Brezillon Signed-off-by: Lee Jones [Also drop the header file dummies that only this driver was using] Acked-by: Greg Kroah-Hartman Signed-off-by: Linus Walleij --- drivers/tty/serial/st-asc.c | 11 ++++++----- include/linux/gpio/consumer.h | 16 ---------------- 2 files changed, 6 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index bcf1d33e6ffe..c334bcc59c64 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios, pinctrl_select_state(ascport->pinctrl, ascport->states[NO_HW_FLOWCTRL]); - gpiod = devm_get_gpiod_from_child(port->dev, "rts", - &np->fwnode); - if (!IS_ERR(gpiod)) { - gpiod_direction_output(gpiod, 0); + gpiod = devm_fwnode_get_gpiod_from_child(port->dev, + "rts", + &np->fwnode, + GPIOD_OUT_LOW, + np->name); + if (!IS_ERR(gpiod)) ascport->rts = gpiod; - } } } diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 2484b2fcc6eb..933d93656605 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, struct fwnode_handle *child, enum gpiod_flags flags, const char *label); -/* FIXME: delete this helper when users are switched over */ -static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, - const char *con_id, struct fwnode_handle *child) -{ - return devm_fwnode_get_index_gpiod_from_child(dev, con_id, - 0, child, - GPIOD_ASIS, - "?"); -} #else /* CONFIG_GPIOLIB */ @@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, return ERR_PTR(-ENOSYS); } -/* FIXME: delete this when all users are switched over */ -static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, - const char *con_id, struct fwnode_handle *child) -{ - return ERR_PTR(-ENOSYS); -} - #endif /* CONFIG_GPIOLIB */ static inline -- cgit v1.2.3 From 6e9f44eaaef0df7b846e9316fa9ca72a02025d44 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 9 Mar 2017 11:32:28 -0600 Subject: USB: serial: option: add Quectel UC15, UC20, EC21, and EC25 modems Add Quectel UC15, UC20, EC21, and EC25. The EC20 is handled by qcserial due to a USB VID/PID conflict with an existing Acer device. Signed-off-by: Dan Williams Cc: stable Signed-off-by: Johan Hovold --- drivers/usb/serial/option.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 42cc72e54c05..af67a0de6b5d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb); #define BANDRICH_PRODUCT_1012 0x1012 #define QUALCOMM_VENDOR_ID 0x05C6 +/* These Quectel products use Qualcomm's vendor ID */ +#define QUECTEL_PRODUCT_UC20 0x9003 +#define QUECTEL_PRODUCT_UC15 0x9090 + +#define QUECTEL_VENDOR_ID 0x2c7c +/* These Quectel products use Quectel's vendor ID */ +#define QUECTEL_PRODUCT_EC21 0x0121 +#define QUECTEL_PRODUCT_EC25 0x0125 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ + /* Quectel products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + /* Quectel products using Quectel vendor ID */ + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, -- cgit v1.2.3 From da9a796f5475b4d3a339083af719982b7ab4a12b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Mar 2017 16:59:57 +0000 Subject: drm/i915: Split GEM resetting into 3 phases Currently we do a reset prepare/finish around the call to reset the GPU, but it looks like we need a later stage after the hw has been reinitialised to allow GEM to restart itself. Start by splitting the 2 GEM phases into 3: prepare - before the reset, check if GEM recovered, then stop GEM reset - after the reset, update GEM bookkeeping finish - after the re-initialisation following the reset, restart GEM Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20170208143033.11651-2-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/20170313165958.13970-1-chris@chris-wilson.co.uk (cherry picked from commit d80270931314a88d79d9bd5e0a5df93c12196375) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 3 ++- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 7 ++++++- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e703556eba99..2093d203665d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1788,7 +1788,7 @@ void i915_reset(struct drm_i915_private *dev_priv) goto error; } - i915_gem_reset_finish(dev_priv); + i915_gem_reset(dev_priv); intel_overlay_reset(dev_priv); /* Ok, now get things going again... */ @@ -1811,6 +1811,7 @@ void i915_reset(struct drm_i915_private *dev_priv) goto error; } + i915_gem_reset_finish(dev_priv); i915_queue_hangcheck(dev_priv); wakeup: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7febe6eecf72..1d20c2d00f42 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3342,6 +3342,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) } int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); +void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 10777da73039..27fe5a9315f0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2834,7 +2834,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) engine->reset_hw(engine, request); } -void i915_gem_reset_finish(struct drm_i915_private *dev_priv) +void i915_gem_reset(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -2856,6 +2856,11 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) } } +void i915_gem_reset_finish(struct drm_i915_private *dev_priv) +{ + lockdep_assert_held(&dev_priv->drm.struct_mutex); +} + static void nop_submit_request(struct drm_i915_gem_request *request) { dma_fence_set_error(&request->fence, -EIO); -- cgit v1.2.3 From 4565bf58d45b8aded94e446666839955cdb5030c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Mar 2017 16:59:58 +0000 Subject: drm/i915: Disable engine->irq_tasklet around resets When we restart the engines, and we have active requests, a request on the first engine may complete and queue a request to the second engine before we try to restart the second engine. That queueing of the request may race with the engine to restart, and so may corrupt the current state. Disabling the engine->irq_tasklet prevents the two paths from writing into ELSP simultaneously (and modifyin the execlists_port[] at the same time). Include fixup 1d309634bcf4 ("drm/i915: Kill the tasklet then disable") Fixes: 821ed7df6e2a ("drm/i915: Update reset path to fix incomplete requests") Testcase: igt/gem_exec_fence/await-hang Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20170208143033.11651-3-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/20170313165958.13970-2-chris@chris-wilson.co.uk (cherry picked from commit 1f7b847d72c3583df5048d83bd945d0c2c524c28) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 27fe5a9315f0..1051fdf37d20 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2719,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) { struct drm_i915_gem_request *request; + /* Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its engine->irq_tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the engine->irq_tasklet until the reset is over + * prevents the race. + */ tasklet_kill(&engine->irq_tasklet); + tasklet_disable(&engine->irq_tasklet); if (engine_stalled(engine)) { request = i915_gem_find_active_request(engine); @@ -2858,7 +2867,13 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) void i915_gem_reset_finish(struct drm_i915_private *dev_priv) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + for_each_engine(engine, dev_priv, id) + tasklet_enable(&engine->irq_tasklet); } static void nop_submit_request(struct drm_i915_gem_request *request) -- cgit v1.2.3 From 35a3abfd198e6c69a6644784bb09a2d951fc6b21 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Mar 2017 17:02:31 +0000 Subject: drm/i915: Only enable hotplug interrupts if the display interrupts are enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to prevent accessing the hpd registers outside of the display power wells, we should refrain from writing to the registers before the display interrupts are enabled. [ 4.740136] WARNING: CPU: 1 PID: 221 at drivers/gpu/drm/i915/intel_uncore.c:795 __unclaimed_reg_debug+0x44/0x50 [i915] [ 4.740155] Unclaimed read from register 0x1e1110 [ 4.740168] Modules linked in: i915(+) intel_gtt drm_kms_helper prime_numbers [ 4.740190] CPU: 1 PID: 221 Comm: systemd-udevd Not tainted 4.10.0-rc6+ #384 [ 4.740203] Hardware name: / , BIOS PYBSWCEL.86A.0027.2015.0507.1758 05/07/2015 [ 4.740220] Call Trace: [ 4.740236] dump_stack+0x4d/0x6f [ 4.740251] __warn+0xc1/0xe0 [ 4.740265] warn_slowpath_fmt+0x4a/0x50 [ 4.740281] ? insert_work+0x77/0xc0 [ 4.740355] ? fwtable_write32+0x90/0x130 [i915] [ 4.740431] __unclaimed_reg_debug+0x44/0x50 [i915] [ 4.740507] fwtable_read32+0xd8/0x130 [i915] [ 4.740575] i915_hpd_irq_setup+0xa5/0x100 [i915] [ 4.740649] intel_hpd_init+0x68/0x80 [i915] [ 4.740716] i915_driver_load+0xe19/0x1380 [i915] [ 4.740784] i915_pci_probe+0x32/0x90 [i915] [ 4.740799] pci_device_probe+0x8b/0xf0 [ 4.740815] driver_probe_device+0x2b6/0x450 [ 4.740828] __driver_attach+0xda/0xe0 [ 4.740841] ? driver_probe_device+0x450/0x450 [ 4.740853] bus_for_each_dev+0x5b/0x90 [ 4.740865] driver_attach+0x19/0x20 [ 4.740878] bus_add_driver+0x166/0x260 [ 4.740892] driver_register+0x5b/0xd0 [ 4.740906] ? 0xffffffffa0166000 [ 4.740920] __pci_register_driver+0x47/0x50 [ 4.740985] i915_init+0x5c/0x5e [i915] [ 4.740999] do_one_initcall+0x3e/0x160 [ 4.741015] ? __vunmap+0x7c/0xc0 [ 4.741029] ? kmem_cache_alloc+0xcf/0x120 [ 4.741045] do_init_module+0x55/0x1c4 [ 4.741060] load_module+0x1f3f/0x25b0 [ 4.741073] ? __symbol_put+0x40/0x40 [ 4.741086] ? kernel_read_file+0x100/0x190 [ 4.741100] SYSC_finit_module+0xbc/0xf0 [ 4.741112] SyS_finit_module+0x9/0x10 [ 4.741125] entry_SYSCALL_64_fastpath+0x17/0x98 [ 4.741135] RIP: 0033:0x7f8559a140f9 [ 4.741145] RSP: 002b:00007fff7509a3e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 4.741161] RAX: ffffffffffffffda RBX: 00007f855aba02d1 RCX: 00007f8559a140f9 [ 4.741172] RDX: 0000000000000000 RSI: 000055b6db0914f0 RDI: 0000000000000011 [ 4.741183] RBP: 0000000000020000 R08: 0000000000000000 R09: 000000000000000e [ 4.741193] R10: 0000000000000011 R11: 0000000000000246 R12: 000055b6db0854d0 [ 4.741204] R13: 000055b6db091150 R14: 0000000000000000 R15: 000055b6db035924 v2: Set dev_priv->display_irqs_enabled to true for all platforms other than vlv/chv that manually control the display power domain. Fixes: 19625e85c6ec ("drm/i915: Enable polling when we don't have hpd") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=97798 Suggested-by: Ville Syrjälä Signed-off-by: Chris Wilson Cc: Lyude Cc: Daniel Vetter Cc: Ville Syrjälä Cc: Hans de Goede Cc: stable@vger.kernel.org Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170215131547.5064-1-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/20170313170231.18633-1-chris@chris-wilson.co.uk (cherry picked from commit 262fd485ac6b476479f41f00bb104f6a1766ae66) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_irq.c | 10 ++++++++++ drivers/gpu/drm/i915/intel_hotplug.c | 14 ++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e6ffef2f707a..4fc8973744b4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4266,6 +4266,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (!IS_GEN2(dev_priv)) dev->vblank_disable_immediate = true; + /* Most platforms treat the display irq block as an always-on + * power domain. vlv/chv can disable it at runtime and need + * special care to avoid writing any of the display block registers + * outside of the power domain. We defer setting up the display irqs + * in this case to the runtime pm. + */ + dev_priv->display_irqs_enabled = true; + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->display_irqs_enabled = false; + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index b62e3f8ad415..54208bef7a83 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) } } } - if (dev_priv->display.hpd_irq_setup) + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, } } - if (storm_detected) + if (storm_detected && dev_priv->display_irqs_enabled) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); @@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) * Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked checks happy. */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) { + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + } } static void i915_hpd_poll_init_work(struct work_struct *work) -- cgit v1.2.3 From 0f5418e564ac6452b9086295646e602a9addc4bf Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Mon, 13 Mar 2017 17:04:33 +0000 Subject: drm/i915: Drop support for I915_EXEC_CONSTANTS_* execbuf parameters. This patch makes the I915_PARAM_HAS_EXEC_CONSTANTS getparam return 0 (indicating the optional feature is not supported), and makes execbuf always return -EINVAL if the flags are used. Apparently, no userspace ever shipped which used this optional feature: I checked the git history of Mesa, xf86-video-intel, libva, and Beignet, and there were zero commits showing a use of these flags. Kernel commit 72bfa19c8deb4 apparently introduced the feature prematurely. According to Chris, the intention was to use this in cairo-drm, but "the use was broken for gen6", so I don't think it ever happened. 'relative_constants_mode' has always been tracked per-device, but this has actually been wrong ever since hardware contexts were introduced, as the INSTPM register is saved (and automatically restored) as part of the render ring context. The software per-device value could therefore get out of sync with the hardware per-context value. This meant that using them is actually unsafe: a client which tried to use them could damage the state of other clients, causing the GPU to interpret their BO offsets as absolute pointers, leading to bogus memory reads. These flags were also never ported to execlist mode, making them no-ops on Gen9+ (which requires execlists), and Gen8 in the default mode. On Gen8+, userspace can write these registers directly, achieving the same effect. On Gen6-7.5, it likely makes sense to extend the command parser to support them. I don't think anyone wants this on Gen4-5. Based on a patch by Dave Gordon. v3: Return -ENODEV for the getparam, as this is what we do for other obsolete features. Suggested by Chris Wilson. Cc: stable@vger.kernel.org Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=92448 Signed-off-by: Kenneth Graunke Reviewed-by: Joonas Lahtinen Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170215093446.21291-1-kenneth@whitecape.org Acked-by: Daniel Vetter Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170313170433.26843-1-chris@chris-wilson.co.uk (cherry picked from commit ef0f411f51475f4eebf9fc1b19a85be698af19ff) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 4 +-- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 2 -- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 52 ++---------------------------- 4 files changed, 3 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2093d203665d..6cd78bb2064d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_IRQ_ACTIVE: case I915_PARAM_ALLOW_BATCHBUFFER: case I915_PARAM_LAST_DISPATCH: + case I915_PARAM_HAS_EXEC_CONSTANTS: /* Reject all old ums/dri params. */ return -ENODEV; case I915_PARAM_CHIPSET_ID: @@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BSD2: value = !!dev_priv->engine[VCS2]; break; - case I915_PARAM_HAS_EXEC_CONSTANTS: - value = INTEL_GEN(dev_priv) >= 4; - break; case I915_PARAM_HAS_LLC: value = HAS_LLC(dev_priv); break; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1d20c2d00f42..80be09831a52 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2064,8 +2064,6 @@ struct drm_i915_private { const struct intel_device_info info; - int relative_constants_mode; - void __iomem *regs; struct intel_uncore uncore; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1051fdf37d20..67b1fc5a0331 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4694,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); - dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - init_waitqueue_head(&dev_priv->pending_flip_queue); dev_priv->mm.interruptible = true; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d02cfaefe1c8..30e0675fd7da 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas) { - struct drm_i915_private *dev_priv = params->request->i915; u64 exec_start, exec_len; - int instp_mode; - u32 instp_mask; int ret; ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); @@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params, if (ret) return ret; - instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; - instp_mask = I915_EXEC_CONSTANTS_MASK; - switch (instp_mode) { - case I915_EXEC_CONSTANTS_REL_GENERAL: - case I915_EXEC_CONSTANTS_ABSOLUTE: - case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && params->engine->id != RCS) { - DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); - return -EINVAL; - } - - if (instp_mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev_priv)->gen < 4) { - DRM_DEBUG("no rel constants on pre-gen4\n"); - return -EINVAL; - } - - if (INTEL_INFO(dev_priv)->gen > 5 && - instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { - DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); - return -EINVAL; - } - - /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev_priv)->gen >= 6) - instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; - } - break; - default: - DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); + if (args->flags & I915_EXEC_CONSTANTS_MASK) { + DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n"); return -EINVAL; } - if (params->engine->id == RCS && - instp_mode != dev_priv->relative_constants_mode) { - struct intel_ring *ring = params->request->ring; - - ret = intel_ring_begin(params->request, 4); - if (ret) - return ret; - - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, INSTPM); - intel_ring_emit(ring, instp_mask << 16 | instp_mode); - intel_ring_advance(ring); - - dev_priv->relative_constants_mode = instp_mode; - } - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { ret = i915_reset_gen7_sol_offsets(params->request); if (ret) -- cgit v1.2.3 From 8f68d591d4765b2e1ce9d916ac7bc5583285c4ad Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Mar 2017 17:06:17 +0000 Subject: drm/i915: Stop using RP_DOWN_EI on Baytrail On Baytrail, we manually calculate busyness over the evaluation interval to avoid issues with miscaluations with RC6 enabled. However, it turns out that the DOWN_EI interrupt generator is completely bust - it operates in two modes, continuous or never. Neither of which are conducive to good behaviour. Stop unmask the DOWN_EI interrupt and just compute everything from the UP_EI which does seem to correspond to the desired interval. v2: Fixup gen6_rps_pm_mask() as well v3: Inline vlv_c0_above() to combine the now identical elapsed calculation for up/down and simplify the threshold testing Fixes: 43cf3bf084ba ("drm/i915: Improved w/a for rps on Baytrail") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: # v4.1+ Link: http://patchwork.freedesktop.org/patch/msgid/20170309211232.28878-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20170313170617.31564-1-chris@chris-wilson.co.uk (cherry picked from commit e0e8c7cb6eb68e9256de2d8cbeb481d3701c05ac) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_irq.c | 73 ++++++++++++++++------------------------- drivers/gpu/drm/i915/intel_pm.c | 5 +-- 3 files changed, 32 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 80be09831a52..1e53c31b6826 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1325,7 +1325,7 @@ struct intel_gen6_power_mgmt { unsigned boosts; /* manual wa residency calculations */ - struct intel_rps_ei up_ei, down_ei; + struct intel_rps_ei ei; /* * Protects RPS/RC6 register access and PCU communication. diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4fc8973744b4..b6c886ac901b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv, ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); } -static bool vlv_c0_above(struct drm_i915_private *dev_priv, - const struct intel_rps_ei *old, - const struct intel_rps_ei *now, - int threshold) -{ - u64 time, c0; - unsigned int mul = 100; - - if (old->cz_clock == 0) - return false; - - if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) - mul <<= 8; - - time = now->cz_clock - old->cz_clock; - time *= threshold * dev_priv->czclk_freq; - - /* Workload can be split between render + media, e.g. SwapBuffers - * being blitted in X after being rendered in mesa. To account for - * this we need to combine both engines into our activity counter. - */ - c0 = now->render_c0 - old->render_c0; - c0 += now->media_c0 - old->media_c0; - c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; - - return c0 >= time; -} - void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) { - vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); - dev_priv->rps.up_ei = dev_priv->rps.down_ei; + memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); } static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) { + const struct intel_rps_ei *prev = &dev_priv->rps.ei; struct intel_rps_ei now; u32 events = 0; - if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) return 0; vlv_c0_read(dev_priv, &now); if (now.cz_clock == 0) return 0; - if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { - if (!vlv_c0_above(dev_priv, - &dev_priv->rps.down_ei, &now, - dev_priv->rps.down_threshold)) - events |= GEN6_PM_RP_DOWN_THRESHOLD; - dev_priv->rps.down_ei = now; - } + if (prev->cz_clock) { + u64 time, c0; + unsigned int mul; - if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { - if (vlv_c0_above(dev_priv, - &dev_priv->rps.up_ei, &now, - dev_priv->rps.up_threshold)) - events |= GEN6_PM_RP_UP_THRESHOLD; - dev_priv->rps.up_ei = now; + mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) + mul <<= 8; + + time = now.cz_clock - prev->cz_clock; + time *= dev_priv->czclk_freq; + + /* Workload can be split between render + media, + * e.g. SwapBuffers being blitted in X after being rendered in + * mesa. To account for this we need to combine both engines + * into our activity counter. + */ + c0 = now.render_c0 - prev->render_c0; + c0 += now.media_c0 - prev->media_c0; + c0 *= mul; + + if (c0 > time * dev_priv->rps.up_threshold) + events = GEN6_PM_RP_UP_THRESHOLD; + else if (c0 < time * dev_priv->rps.down_threshold) + events = GEN6_PM_RP_DOWN_THRESHOLD; } + dev_priv->rps.ei = now; return events; } @@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 940bab22d464..6a29784d2b41 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4928,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) { u32 mask = 0; + /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ if (val > dev_priv->rps.min_freq_softlimit) - mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; if (val < dev_priv->rps.max_freq_softlimit) mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; @@ -5039,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) + if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) gen6_rps_reset_ei(dev_priv); I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); -- cgit v1.2.3 From abf8315f71dc5a2ee56fb60830dcb2861982dc91 Mon Sep 17 00:00:00 2001 From: Jyri Sarha Date: Tue, 31 Jan 2017 16:18:42 +0200 Subject: drm/tilcdc: Fix hardcoded fail-return value in tilcdc_crtc_create() Fix badly hardcoded return return value under fail-label. All goto branches to the label set the "ret"-variable accordingly. Signed-off-by: Jyri Sarha Reviewed-by: Gabriel Krisman Bertazi --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index f80bf9385e41..abcbcd9f5851 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -1036,5 +1036,5 @@ int tilcdc_crtc_create(struct drm_device *dev) fail: tilcdc_crtc_destroy(crtc); - return -ENOMEM; + return ret; } -- cgit v1.2.3 From 11abbc9f39e002a2b25657e00abac8056cb39e93 Mon Sep 17 00:00:00 2001 From: Jyri Sarha Date: Wed, 1 Mar 2017 10:30:28 +0200 Subject: drm/tilcdc: Set framebuffer DMA address to HW only if CRTC is enabled Touching HW while clocks are off is a serious error and for instance breaks suspend functionality. After this patch tilcdc_crtc_update_fb() always updates the primary plane's framebuffer pointer, increases fb's reference count and stores vblank event. tilcdc_crtc_update_fb() only writes the fb's DMA address to HW if the crtc is enabled, as tilcdc_crtc_enable() takes care of writing the address on enable. This patch also refactors the tilcdc_crtc_update_fb() a bit. Number of subsequent small changes had made it almost unreadable. There should be no other functional changes but checking the CRTC's enable state. However, the locking goes a bit differently and some of the redundant checks have been removed in this new version. The enable_lock should be enough to protect the access to tilcdc_crtc->enabled. The irq_lock protects the access to last_vblank and next_fb. The check for vrefresh and last_vblank being valid is redundant, as the vrefresh should be always valid if the CRTC is enabled and now last_vblank should be too, because it is initialized to current time when CRTC raster is enabled. If for some reason the values are not correctly initialized the division by zero warning is quite appropriate. Signed-off-by: Jyri Sarha Reviewed-by: Tomi Valkeinen --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index abcbcd9f5851..d745e8b50fb8 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); + unsigned long flags; WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); mutex_lock(&tilcdc_crtc->enable_lock); @@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY), LCDC_PALETTE_LOAD_MODE_MASK); + + /* There is no real chance for a race here as the time stamp + * is taken before the raster DMA is started. The spin-lock is + * taken to have a memory barrier after taking the time-stamp + * and to avoid a context switch between taking the stamp and + * enabling the raster. + */ + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + tilcdc_crtc->last_vblank = ktime_get(); tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); drm_crtc_vblank_on(crtc); @@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) } drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); - tilcdc_crtc->last_vblank = 0; tilcdc_crtc->enabled = false; mutex_unlock(&tilcdc_crtc->enable_lock); @@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; - unsigned long flags; WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); @@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, drm_framebuffer_reference(fb); crtc->primary->fb = fb; + tilcdc_crtc->event = event; - spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + mutex_lock(&tilcdc_crtc->enable_lock); - if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { + if (tilcdc_crtc->enabled) { + unsigned long flags; ktime_t next_vblank; s64 tdiff; - next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, - 1000000 / crtc->hwmode.vrefresh); + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, + 1000000 / crtc->hwmode.vrefresh); tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) tilcdc_crtc->next_fb = fb; - } - - if (tilcdc_crtc->next_fb != fb) - set_scanout(crtc, fb); + else + set_scanout(crtc, fb); - tilcdc_crtc->event = event; + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); + } - spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); + mutex_unlock(&tilcdc_crtc->enable_lock); return 0; } -- cgit v1.2.3 From 1b2e5ea0b7061be3ffdcd85918c2f428edace4ba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Feb 2017 17:19:59 +0000 Subject: drm/i915: Always call i915_gem_reset_finish() following i915_gem_reset_prepare() As i915_gem_reset_finish() undoes the steps from i915_gem_reset_prepare() to leave the system in a fully-working state, e.g. to be able to free the breadcrumb signal threads, make sure that we always call it even on the error path. Fixes: da9a796f5475 ("drm/i915: Split GEM resetting into 3 phases") Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170212172002.23072-2-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala (cherry picked from commit 8d613c539c74fa9055f88f4116196d7c820bd98f) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6cd78bb2064d..1c75402a59c1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1809,10 +1809,10 @@ void i915_reset(struct drm_i915_private *dev_priv) goto error; } - i915_gem_reset_finish(dev_priv); i915_queue_hangcheck(dev_priv); wakeup: + i915_gem_reset_finish(dev_priv); enable_irq(dev_priv->drm.irq); wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); return; -- cgit v1.2.3 From 16681037e75ce08f2980ac5dbb03414429c7a55d Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Mon, 13 Feb 2017 14:06:10 +0200 Subject: mmc: sdhci-of-arasan: fix incorrect timeout clock sdhci_arasan_get_timeout_clock() divides the frequency it has with (1 << (13 + divisor)). However, the divisor is not some Arasan-specific value, but instead is just the Data Timeout Counter Value from the SDHCI Timeout Control Register. Applying it here like this is wrong as the sdhci driver already takes that value into account when calculating timeouts, and in fact it *sets* that register value based on how long a timeout is wanted. Additionally, sdhci core interprets the .get_timeout_clock callback return value as if it were read from hardware registers, i.e. the unit should be kHz or MHz depending on SDHCI_TIMEOUT_CLK_UNIT capability bit. This bit is set at least on the tested Zynq-7000 SoC. With the tested hardware (SDHCI_TIMEOUT_CLK_UNIT set) this results in too high a timeout clock rate being reported, causing the core to use longer-than-needed timeouts. Additionally, on a partitioned MMC (therefore having erase_group_def bit set) mmc_calc_max_discard() disables discard support as it looks like controller does not support the long timeouts needed for that. Do not apply the extra divisor and return the timeout clock in the expected unit. Tested with a Zynq-7000 SoC and a partitioned Toshiba THGBMAG5A1JBAWR eMMC card. Signed-off-by: Anssi Hannula Fixes: e3ec3a3d11ad ("mmc: arasan: Add driver for Arasan SDHCI") Cc: Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-arasan.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 410a55b1c25f..1cfd7f900339 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -28,13 +28,9 @@ #include "sdhci-pltfm.h" #include -#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 #define VENDOR_ENHANCED_STROBE BIT(0) -#define CLK_CTRL_TIMEOUT_SHIFT 16 -#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT) -#define CLK_CTRL_TIMEOUT_MIN_EXP 13 #define PHY_CLK_TOO_SLOW_HZ 400000 @@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host, static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) { - u32 div; unsigned long freq; struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); - div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; + /* SDHCI timeout clock is in kHz */ + freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000); - freq = clk_get_rate(pltfm_host->clk); - freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); + /* or in MHz */ + if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) + freq = DIV_ROUND_UP(freq, 1000); return freq; } -- cgit v1.2.3 From 9c31b087348cb2b5e668261f2eee2f224b3780b5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 13 Feb 2017 19:58:18 +0200 Subject: drm/i915: Reject HDMI 12bpc if the sink doesn't indicate support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check that the sink really declared 12bpc support before we enable it. This should not actually never happen since it's mandatory for HDMI sinks to support 12bpc if they support any deep color modes. But reality disagrees with the theory and there are actually sinks in the wild that violate the spec. v2: Fix the output_types check Update commit message to state that these things are in fact real Cc: stable@vger.kernel.org Cc: Nicholas Sielicki Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99250 Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170213175818.24958-1-ville.syrjala@linux.intel.com Reviewed-by: Shashank Sharma (cherry picked from commit c750bdd3e7e204cc88b32806c3864487a03cd84b) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_hdmi.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ebae2bd83918..24b2fa5b6282 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector, static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_i915_private *dev_priv = + to_i915(crtc_state->base.crtc->dev); + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; + struct drm_connector *connector; + int i; - if (HAS_GMCH_DISPLAY(to_i915(dev))) + if (HAS_GMCH_DISPLAY(dev_priv)) return false; /* * HDMI 12bpc affects the clocks, so it's only possible * when not cloning with other encoder types. */ - return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; + if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) + return false; + + for_each_connector_in_state(state, connector, connector_state, i) { + const struct drm_display_info *info = &connector->display_info; + + if (connector_state->crtc != crtc_state->base.crtc) + continue; + + if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0) + return false; + } + + return true; } bool intel_hdmi_compute_config(struct intel_encoder *encoder, -- cgit v1.2.3 From 773dc118756b1f38766063e90e582016be868f09 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 1 Mar 2017 14:11:47 -0800 Subject: mmc: core: Fix access to HS400-ES devices HS400-ES devices fail to initialize with the following error messages. mmc1: power class selection to bus width 8 ddr 0 failed mmc1: error -110 whilst initialising MMC card This was seen on Samsung Chromebook Plus. Code analysis points to commit 3d4ef329757c ("mmc: core: fix multi-bit bus width without high-speed mode"), which attempts to set the bus width for all but HS200 devices unconditionally. However, for HS400-ES, the bus width is already selected. Cc: Anssi Hannula Cc: Douglas Anderson Cc: Brian Norris Fixes: 3d4ef329757c ("mmc: core: fix multi-bit bus width ...") Signed-off-by: Guenter Roeck Reviewed-by: Douglas Anderson Reviewed-by: Shawn Lin Tested-by: Heiko Stuebner Cc: Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 7fd722868875..b502601df228 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, err = mmc_select_hs400(card); if (err) goto free_card; - } else { + } else if (!mmc_card_hs400es(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); if (err > 0 && mmc_card_hs(card)) { -- cgit v1.2.3 From 2602b740e45cc64feb55d5a9ee8db744ab3becbb Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 13 Mar 2017 14:36:32 +0200 Subject: mmc: block: Fix is_waiting_last_req set incorrectly Commit 15520111500c ("mmc: core: Further fix thread wake-up") allowed a queue to release the host with is_waiting_last_req set to true. A queue waiting to claim the host will not reset it, which can result in the queue getting stuck in a loop. Fixes: 15520111500c ("mmc: core: Further fix thread wake-up") Signed-off-by: Adrian Hunter Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1621fa08e206..e59107ca512a 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1817,6 +1817,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_blk_issue_flush(mq, req); } else { mmc_blk_issue_rw_rq(mq, req); + card->host->context_info.is_waiting_last_req = false; } out: -- cgit v1.2.3 From 8ecc34448e24e9e8a8f3a9b37be70b43c6af5288 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 13 Mar 2017 14:36:33 +0200 Subject: mmc: block: Fix cmd error reset failure path Commit 4e1f780032c5 ("mmc: block: break out mmc_blk_rw_cmd_abort()") assumed the request had not completed, but in one case it had. Fix that. Fixes: 4e1f780032c5 ("mmc: block: break out mmc_blk_rw_cmd_abort()") Signed-off-by: Adrian Hunter Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e59107ca512a..05afefcfb611 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1701,7 +1701,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) case MMC_BLK_CMD_ERR: req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); if (mmc_blk_reset(md, card->host, type)) { - mmc_blk_rw_cmd_abort(card, old_req); + if (req_pending) + mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_try_restart(mq, new_req); return; } -- cgit v1.2.3 From 0977762f6d15f13caccc20d71a5dec47d098907d Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 13 Mar 2017 13:44:35 -0700 Subject: md/r5cache: fix set_syndrome_sources() for data in cache Before this patch, device InJournal will be included in prexor (SYNDROME_SRC_WANT_DRAIN) but not in reconstruct (SYNDROME_SRC_WRITTEN). So it will break parity calculation. With srctype == SYNDROME_SRC_WRITTEN, we need include both dev with non-null ->written and dev with R5_InJournal. This fixes logic in 1e6d690(md/r5cache: caching phase of r5cache) Cc: stable@vger.kernel.org (v4.10+) Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6bfedfcf41c1..ed5cd705b985 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs, (test_bit(R5_Wantdrain, &dev->flags) || test_bit(R5_InJournal, &dev->flags))) || (srctype == SYNDROME_SRC_WRITTEN && - dev->written)) { + (dev->written || + test_bit(R5_InJournal, &dev->flags)))) { if (test_bit(R5_InJournal, &dev->flags)) srcs[slot] = sh->dev[i].orig_page; else -- cgit v1.2.3 From 8c53ad2139137dd4bf506a2c2b888de3816e8f75 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 13 Mar 2017 15:14:08 +0800 Subject: drm/amd/powerplay: fix copy error in smu7_clockpoweragting.c Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 8cf71f3c6d0e..261b828ad590 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) if (bgate) { cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + AMD_PG_STATE_GATE); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); -- cgit v1.2.3 From 11353b9d10392e79e32603d2178e75feb25eaf0d Mon Sep 17 00:00:00 2001 From: Zhilong Liu Date: Tue, 14 Mar 2017 15:52:26 +0800 Subject: md/raid1: fix a trivial typo in comments raid1.c: fix a trivial typo in comments of freeze_array(). Cc: Jack Wang Cc: Guoqing Jiang Cc: John Stoffel Acked-by: Coly Li Signed-off-by: Zhilong Liu Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c33e96e33b8e..a34f58772022 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf) static void freeze_array(struct r1conf *conf, int extra) { /* Stop sync I/O and normal I/O and wait for everything to - * go quite. + * go quiet. * This is called in two situations: * 1) management command handlers (reshape, remove disk, quiesce). * 2) one normal I/O request failed. -- cgit v1.2.3 From 655d9ca9ac075da1ef2a45012ba48a39f6eb1f58 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Mar 2017 22:27:11 +0100 Subject: drm: amd: remove broken include path The AMD ACP driver adds "-I../acp -I../acp/include" to the gcc command line, which makes no sense, since these are evaluated relative to the build directory. When we build with "make W=1", they instead cause a warning: cc1: error: ../acp/: No such file or directory [-Werror=missing-include-dirs] cc1: error: ../acp/include: No such file or directory [-Werror=missing-include-dirs] cc1: all warnings being treated as errors ../scripts/Makefile.build:289: recipe for target 'drivers/gpu/drm/amd/amdgpu/amdgpu_drv.o' failed ../scripts/Makefile.build:289: recipe for target 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.o' failed ../scripts/Makefile.build:289: recipe for target 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.o' failed This removes the subdir-ccflags variable that evidently did not serve any purpose here. Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/acp/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile index 8363cb57915b..8a08e81ee90d 100644 --- a/drivers/gpu/drm/amd/acp/Makefile +++ b/drivers/gpu/drm/amd/acp/Makefile @@ -3,6 +3,4 @@ # of AMDSOC/AMDGPU drm driver. # It provides the HW control for ACP related functionalities. -subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include - AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o -- cgit v1.2.3 From e67351d56a709853046fbe652b981fb7ca4c3dcc Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Wed, 15 Mar 2017 11:57:47 +0200 Subject: drm/i915/glk: Remove MODULE_FIRMWARE() tag from Geminilake's DMC Geminilake's DMC is not yet available in the linux-firmware repository. To prevent userspace tools such as mkinitramfs to complain about missing firmware, remove the MODULE_FIRMWARE() tag for now. Fixes: dbb28b5c3d3c ("drm/i915/DMC/GLK: Load DMC on GLK") Cc: Rodrigo Vivi Cc: Anusha Srivatsa Cc: Daniel Vetter Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Cc: Signed-off-by: Ander Conselvan de Oliveira Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20170306085651.14008-1-ander.conselvan.de.oliveira@intel.com Link: http://patchwork.freedesktop.org/patch/msgid/20170315095747.21845-1-ander.conselvan.de.oliveira@intel.com (cherry picked from commit d9321a03efcda867b3a8c6327e01808516f0acd7) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_csr.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 0085bc745f6a..de219b71fb76 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -35,7 +35,6 @@ */ #define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" -MODULE_FIRMWARE(I915_CSR_GLK); #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" -- cgit v1.2.3 From e609ccef5222c73b46b322be7d3796d60bff353d Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Fri, 24 Feb 2017 16:04:15 +0200 Subject: intel_th: Don't leak module refcount on failure to activate Output 'activation' may fail for the reasons of the output driver, for example, if msc's buffer is not allocated. We forget, however, to drop the module reference in this case. So each attempt at activation in this case leaks a reference, preventing the module from ever unloading. This patch adds the missing module_put() in the activation error path. Signed-off-by: Alexander Shishkin Cc: stable@vger.kernel.org # v4.8+ --- drivers/hwtracing/intel_th/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index cdd9b3b26195..7563eceeaaea 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev) else intel_th_trace_enable(thdev); - if (ret) + if (ret) { pm_runtime_put(&thdev->dev); + module_put(thdrv->driver.owner); + } return ret; } -- cgit v1.2.3 From 5118ccd34780f4637a9360be580f41f4c1feab48 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Tue, 8 Sep 2015 14:03:55 +0300 Subject: intel_th: pci: Add Denverton SOC support This adds Intel(R) Trace Hub PCI ID for Denverton SOC. Signed-off-by: Alexander Shishkin --- drivers/hwtracing/intel_th/pci.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0bba3842336e..04bd57b0100a 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -85,6 +85,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), .driver_data = (kernel_ulong_t)0, }, + { + /* Denverton */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), + .driver_data = (kernel_ulong_t)0, + }, { 0 }, }; -- cgit v1.2.3 From 340837f985c2cb87ca0868d4aa9ce42b0fab3a21 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Thu, 30 Jun 2016 16:10:51 +0300 Subject: intel_th: pci: Add Gemini Lake support This adds Intel(R) Trace Hub PCI ID for Gemini Lake SOC. Signed-off-by: Alexander Shishkin --- drivers/hwtracing/intel_th/pci.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 04bd57b0100a..590cf90dd21a 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -90,6 +90,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), .driver_data = (kernel_ulong_t)0, }, + { + /* Gemini Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), + .driver_data = (kernel_ulong_t)0, + }, { 0 }, }; -- cgit v1.2.3 From e4c204ced0ac25e02e58679f07096c5bac0b0d96 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 14 Mar 2017 16:18:34 +0100 Subject: cpufreq: intel_pstate: Avoid percentages in limits-related computations Currently, intel_pstate_update_perf_limits() first converts the policy minimum and maximum limits into percentages of the maximum turbo frequency (rounding up to an integer) and then converts these percentages to fractions (by using fixed-point arithmetic to divide them by 100). That introduces a rounding error unnecessarily, because the fractions can be obtained by carrying out fixed-point divisions directly on the input numbers. Rework the computations in intel_pstate_hwp_set() to use fractions instead of percentages (and drop redundant local variables from there) and modify intel_pstate_update_perf_limits() to compute the fractions directly and percentages out of them. While at it, introduce percent_ext_fp() for converting percentages to fractions (with extended number of fraction bits) and use it in the computations. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 56 +++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ee12641ee010..08e134ffba68 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y) return div64_u64(x << EXT_FRAC_BITS, y); } +static inline int32_t percent_ext_fp(int percent) +{ + return div_ext_fp(percent, 100); +} + /** * struct sample - Store performance sample * @core_avg_perf: Ratio of APERF/MPERF which is the actual average @@ -850,7 +855,6 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) u64 value, cap; for_each_cpu(cpu, policy->cpus) { - int max_perf_pct, min_perf_pct; struct cpudata *cpu_data = all_cpu_data[cpu]; s16 epp; @@ -864,16 +868,14 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) else hw_max = HWP_HIGHEST_PERF(cap); - max_perf_pct = perf_limits->max_perf_pct; - min_perf_pct = perf_limits->min_perf_pct; - min = hw_max * min_perf_pct / 100; + min = fp_ext_toint(hw_max * perf_limits->min_perf); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); - max = hw_max * max_perf_pct / 100; + max = fp_ext_toint(hw_max * perf_limits->max_perf); value &= ~HWP_MAX_PERF(~0L); value |= HWP_MAX_PERF(max); @@ -1223,7 +1225,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, limits->max_perf_pct); limits->max_perf_pct = max(limits->min_perf_pct, limits->max_perf_pct); - limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); + limits->max_perf = percent_ext_fp(limits->max_perf_pct); intel_pstate_update_policies(); @@ -1260,7 +1262,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, limits->min_perf_pct); limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); - limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); + limits->min_perf = percent_ext_fp(limits->min_perf_pct); intel_pstate_update_policies(); @@ -2078,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, struct perf_limits *limits) { + int32_t max_policy_perf, min_policy_perf; - limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, - policy->cpuinfo.max_freq); - limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); + max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq); + max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); if (policy->max == policy->min) { - limits->min_policy_pct = limits->max_policy_pct; + min_policy_perf = max_policy_perf; } else { - limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, - policy->cpuinfo.max_freq); - limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, - 0, 100); + min_policy_perf = div_ext_fp(policy->min, + policy->cpuinfo.max_freq); + min_policy_perf = clamp_t(int32_t, min_policy_perf, + 0, max_policy_perf); } - /* Normalize user input to [min_policy_pct, max_policy_pct] */ - limits->min_perf_pct = max(limits->min_policy_pct, - limits->min_sysfs_pct); - limits->min_perf_pct = min(limits->max_policy_pct, - limits->min_perf_pct); - limits->max_perf_pct = min(limits->max_policy_pct, - limits->max_sysfs_pct); - limits->max_perf_pct = max(limits->min_policy_pct, - limits->max_perf_pct); + /* Normalize user input to [min_perf, max_perf] */ + limits->min_perf = max(min_policy_perf, + percent_ext_fp(limits->min_sysfs_pct)); + limits->min_perf = min(limits->min_perf, max_policy_perf); + limits->max_perf = min(max_policy_perf, + percent_ext_fp(limits->max_sysfs_pct)); + limits->max_perf = max(min_policy_perf, limits->max_perf); - /* Make sure min_perf_pct <= max_perf_pct */ - limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); + /* Make sure min_perf <= max_perf */ + limits->min_perf = min(limits->min_perf, limits->max_perf); - limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); - limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); + limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100); + limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100); pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, limits->max_perf_pct, limits->min_perf_pct); -- cgit v1.2.3 From 85b29008d8af6d94a0723aaa8d93cfb6e041158b Mon Sep 17 00:00:00 2001 From: Don Brace Date: Fri, 10 Mar 2017 14:35:11 -0600 Subject: scsi: hpsa: update check for logical volume status - Add in a new case for volume offline. Resolves internal testing bug for multilun array management. - Return correct status for failed TURs. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 35 ++++++++++++++++------------------- drivers/scsi/hpsa_cmd.h | 2 ++ 2 files changed, 18 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 524a0c755ed7..90b76c4c6d36 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3714,7 +3714,7 @@ exit_failed: * # (integer code indicating one of several NOT READY states * describing why a volume is to be kept offline) */ -static int hpsa_volume_offline(struct ctlr_info *h, +static unsigned char hpsa_volume_offline(struct ctlr_info *h, unsigned char scsi3addr[]) { struct CommandList *c; @@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, DEFAULT_TIMEOUT); if (rc) { cmd_free(h, c); - return 0; + return HPSA_VPD_LV_STATUS_UNSUPPORTED; } sense = c->err_info->SenseInfo; if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) @@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h, cmd_status = c->err_info->CommandStatus; scsi_status = c->err_info->ScsiStatus; cmd_free(h, c); - /* Is the volume 'not ready'? */ - if (cmd_status != CMD_TARGET_STATUS || - scsi_status != SAM_STAT_CHECK_CONDITION || - sense_key != NOT_READY || - asc != ASC_LUN_NOT_READY) { - return 0; - } /* Determine the reason for not ready state */ ldstat = hpsa_get_volume_status(h, scsi3addr); /* Keep volume offline in certain cases: */ switch (ldstat) { + case HPSA_LV_FAILED: case HPSA_LV_UNDERGOING_ERASE: case HPSA_LV_NOT_AVAILABLE: case HPSA_LV_UNDERGOING_RPI: @@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, default: break; } - return 0; + return HPSA_LV_OK; } /* @@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h, /* Do an inquiry to the device to see what it is. */ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { - /* Inquiry failed (msg printed already) */ dev_err(&h->pdev->dev, - "hpsa_update_device_info: inquiry failed\n"); - rc = -EIO; + "%s: inquiry failed, device will be skipped.\n", + __func__); + rc = HPSA_INQUIRY_FAILED; goto bail_out; } @@ -3885,15 +3879,19 @@ static int hpsa_update_device_info(struct ctlr_info *h, if ((this_device->devtype == TYPE_DISK || this_device->devtype == TYPE_ZBC) && is_logical_dev_addr_mode(scsi3addr)) { - int volume_offline; + unsigned char volume_offline; hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) hpsa_get_ioaccel_status(h, scsi3addr, this_device); volume_offline = hpsa_volume_offline(h, scsi3addr); - if (volume_offline < 0 || volume_offline > 0xff) - volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; - this_device->volume_offline = volume_offline & 0xff; + if (volume_offline == HPSA_LV_FAILED) { + rc = HPSA_LV_FAILED; + dev_err(&h->pdev->dev, + "%s: LV failed, device will be skipped.\n", + __func__); + goto bail_out; + } } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; @@ -4379,8 +4377,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) goto out; } if (rc) { - dev_warn(&h->pdev->dev, - "Inquiry failed, skipping device.\n"); + h->drv_req_rescan = 1; continue; } diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index a584cdf07058..5961705eef76 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -156,6 +156,7 @@ #define CFGTBL_BusType_Fibre2G 0x00000200l /* VPD Inquiry types */ +#define HPSA_INQUIRY_FAILED 0x02 #define HPSA_VPD_SUPPORTED_PAGES 0x00 #define HPSA_VPD_LV_DEVICE_ID 0x83 #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 @@ -166,6 +167,7 @@ /* Logical volume states */ #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff #define HPSA_LV_OK 0x0 +#define HPSA_LV_FAILED 0x01 #define HPSA_LV_NOT_AVAILABLE 0x0b #define HPSA_LV_UNDERGOING_ERASE 0x0F #define HPSA_LV_UNDERGOING_RPI 0x12 -- cgit v1.2.3 From 87b9e6aa87d9411f1059aa245c0c79976bc557ac Mon Sep 17 00:00:00 2001 From: Don Brace Date: Fri, 10 Mar 2017 14:35:17 -0600 Subject: scsi: hpsa: limit outstanding rescans Avoid rescan storms. No need to queue another if one is pending. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Tomas Henzl Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 16 +++++++++++++++- drivers/scsi/hpsa.h | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 90b76c4c6d36..0a8ac68f4ca1 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5555,7 +5555,7 @@ static void hpsa_scan_complete(struct ctlr_info *h) spin_lock_irqsave(&h->scan_lock, flags); h->scan_finished = 1; - wake_up_all(&h->scan_wait_queue); + wake_up(&h->scan_wait_queue); spin_unlock_irqrestore(&h->scan_lock, flags); } @@ -5573,11 +5573,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh) if (unlikely(lockup_detected(h))) return hpsa_scan_complete(h); + /* + * If a scan is already waiting to run, no need to add another + */ + spin_lock_irqsave(&h->scan_lock, flags); + if (h->scan_waiting) { + spin_unlock_irqrestore(&h->scan_lock, flags); + return; + } + + spin_unlock_irqrestore(&h->scan_lock, flags); + /* wait until any scan already in progress is finished. */ while (1) { spin_lock_irqsave(&h->scan_lock, flags); if (h->scan_finished) break; + h->scan_waiting = 1; spin_unlock_irqrestore(&h->scan_lock, flags); wait_event(h->scan_wait_queue, h->scan_finished); /* Note: We don't need to worry about a race between this @@ -5587,6 +5599,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh) */ } h->scan_finished = 0; /* mark scan as in progress */ + h->scan_waiting = 0; spin_unlock_irqrestore(&h->scan_lock, flags); if (unlikely(lockup_detected(h))) @@ -8789,6 +8802,7 @@ reinit_after_soft_reset: init_waitqueue_head(&h->event_sync_wait_queue); mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ + h->scan_waiting = 0; pci_set_drvdata(pdev, h); h->ndevices = 0; diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index bf6cdc106654..6f04f2ad4125 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -201,6 +201,7 @@ struct ctlr_info { dma_addr_t errinfo_pool_dhandle; unsigned long *cmd_pool_bits; int scan_finished; + u8 scan_waiting : 1; spinlock_t scan_lock; wait_queue_head_t scan_wait_queue; -- cgit v1.2.3 From 2ef2884980873081a4edae92f9d88dd580c85f6e Mon Sep 17 00:00:00 2001 From: Don Brace Date: Fri, 10 Mar 2017 14:35:23 -0600 Subject: scsi: hpsa: do not timeout reset operations Resets can take longer than DEFAULT_TIMEOUT. Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Tomas Henzl Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 0a8ac68f4ca1..0d0be7754a65 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, /* fill_cmd can't fail here, no data buffer to map. */ (void) fill_cmd(c, reset_type, h, NULL, 0, 0, scsi3addr, TYPE_MSG); - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); if (rc) { dev_warn(&h->pdev->dev, "Failed to send reset command\n"); goto out; -- cgit v1.2.3 From 949d7fa158b2b1af533bdb1af0dda8ab103ac58d Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Sun, 12 Mar 2017 12:22:02 +0200 Subject: scsi: ufs: don't check unsigned type for a negative value Fix compilation warning: drivers/scsi/ufs/ufshcd.c:7645:13: warning: comparison of unsigned expression < 0 is always false [-Wtype-limits] if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) Signed-off-by: Tomas Winkler Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 1359913bf840..e8c26e6e6237 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7642,7 +7642,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev, if (kstrtoul(buf, 0, &value)) return -EINVAL; - if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) + if (value >= UFS_PM_LVL_MAX) return -EINVAL; spin_lock_irqsave(hba->host->host_lock, flags); -- cgit v1.2.3 From 7d7080335f8d93a51e8238b6e85be8af4ba452b6 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 8 Mar 2017 14:36:01 -0800 Subject: scsi: lpfc: Finalize Kconfig options for nvme Reviewing the result of what was just added for Kconfig, we made a poor choice. It worked well for full kernel builds, but not so much for how it would be deployed on a distro. Here's the final result: - lpfc will compile in NVME initiator and/or NVME target support based on whether the kernel has the corresponding subsystem support. Kconfig is not used to drive this specifically for lpfc. - There is a module parameter, lpfc_enable_fc4_type, that indicates whether the ports will do FCP-only or FCP & NVME (NVME-only not yet possible due to dependency on fc transport). As FCP & NVME divvys up exchange resources, and given NVME will not be often initially, the default is changed to FCP only. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/Kconfig | 14 -------------- drivers/scsi/lpfc/lpfc_attr.c | 4 ++-- drivers/scsi/lpfc/lpfc_init.c | 7 +++++++ drivers/scsi/lpfc/lpfc_nvme.c | 8 ++++---- drivers/scsi/lpfc/lpfc_nvmet.c | 8 ++++---- 5 files changed, 17 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4bf55b5d78be..3c52867dfe28 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1253,20 +1253,6 @@ config SCSI_LPFC_DEBUG_FS This makes debugging information from the lpfc driver available via the debugfs filesystem. -config LPFC_NVME_INITIATOR - bool "Emulex LightPulse Fibre Channel NVME Initiator Support" - depends on SCSI_LPFC && NVME_FC - ---help--- - This enables NVME Initiator support in the Emulex lpfc driver. - -config LPFC_NVME_TARGET - bool "Emulex LightPulse Fibre Channel NVME Initiator Support" - depends on SCSI_LPFC && NVME_TARGET_FC - ---help--- - This enables NVME Target support in the Emulex lpfc driver. - Target enablement must still be enabled on a per adapter - basis by module parameters. - config SCSI_SIM710 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" depends on (EISA || MCA) && SCSI diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index fbd3a563be53..84aa62f1a4de 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3315,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST, * lpfc_enable_fc4_type: Defines what FC4 types are supported. * Supported Values: 1 - register just FCP * 3 - register both FCP and NVME - * Supported values are [1,3]. Default value is 3 + * Supported values are [1,3]. Default value is 1 */ -LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, +LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, "Define fc4 type to register with fabric."); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2697d49da4d7..6cc561b04211 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -5891,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Check to see if it matches any module parameter */ for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { if (wwn == lpfc_enable_nvmet[i]) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6017 NVME Target %016llx\n", wwn); phba->nvmet_support = 1; /* a match */ +#else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6021 Can't enable NVME Target." + " NVME_TARGET_FC infrastructure" + " is not in kernel\n"); +#endif } } } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 0a4c19081409..0024de1c6c1f 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -2149,7 +2149,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) /* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area. */ -#ifdef CONFIG_LPFC_NVME_INITIATOR +#if (IS_ENABLED(CONFIG_NVME_FC)) ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, &vport->phba->pcidev->dev, &localport); #else @@ -2190,7 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) void lpfc_nvme_destroy_localport(struct lpfc_vport *vport) { -#ifdef CONFIG_LPFC_NVME_INITIATOR +#if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; @@ -2274,7 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport) int lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { -#ifdef CONFIG_LPFC_NVME_INITIATOR +#if (IS_ENABLED(CONFIG_NVME_FC)) int ret = 0; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; @@ -2403,7 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) void lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { -#ifdef CONFIG_LPFC_NVME_INITIATOR +#if (IS_ENABLED(CONFIG_NVME_FC)) int ret; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index b7739a554fe0..7ca868f394da 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -671,7 +671,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; -#ifdef CONFIG_LPFC_NVME_TARGET +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, &phba->pcidev->dev, &phba->targetport); @@ -756,7 +756,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) { -#ifdef CONFIG_LPFC_NVME_TARGET +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp; if (phba->nvmet_support == 0) @@ -788,7 +788,7 @@ static void lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct hbq_dmabuf *nvmebuf) { -#ifdef CONFIG_LPFC_NVME_TARGET +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct lpfc_nvmet_rcv_ctx *ctxp; @@ -891,7 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, struct rqb_dmabuf *nvmebuf, uint64_t isr_timestamp) { -#ifdef CONFIG_LPFC_NVME_TARGET +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; -- cgit v1.2.3 From fe8daf5fa715f7214952f06a387e4b7de818c5be Mon Sep 17 00:00:00 2001 From: Taku Izumi Date: Wed, 15 Mar 2017 13:47:50 +0900 Subject: fjes: Fix wrong netdevice feature flags This patch fixes netdev->features for Extended Socket network device. Currently Extended Socket network device's netdev->feature claims NETIF_F_HW_CSUM, however this is completely wrong. There's no feature of checksum offloading. That causes invalid TCP/UDP checksum and packet rejection when IP forwarding from Extended Socket network device to other network device. NETIF_F_HW_CSUM should be omitted. Signed-off-by: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b75d9cdcfb0c..c4b3c4b77a9c 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1316,7 +1316,7 @@ static void fjes_netdev_setup(struct net_device *netdev) netdev->min_mtu = fjes_support_mtu[0]; netdev->max_mtu = fjes_support_mtu[3]; netdev->flags |= IFF_BROADCAST; - netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } static void fjes_irq_watch_task(struct work_struct *work) -- cgit v1.2.3 From e83bb3e6f3efa21f4a9d883a25d0ecd9dfb431e1 Mon Sep 17 00:00:00 2001 From: Michael Engl Date: Tue, 3 Oct 2017 13:57:00 +0100 Subject: iio: adc: ti_am335x_adc: fix fifo overrun recovery The tiadc_irq_h(int irq, void *private) function is handling FIFO overruns by clearing flags, disabling and enabling the ADC to recover. If the ADC is running in continuous mode a FIFO overrun happens regularly. If the disabling of the ADC happens concurrently with a new conversion. It might happen that the enabling of the ADC is ignored by the hardware. This stops the ADC permanently. No more interrupts are triggered. According to the AM335x Reference Manual (SPRUH73H October 2011 - Revised April 2013 - Chapter 12.4 and 12.5) it is necessary to check the ADC FSM bits in REG_ADCFSM before enabling the ADC again. Because the disabling of the ADC is done right after the current conversion has been finished. To trigger this bug it is necessary to run the ADC in continuous mode. The ADC values of all channels need to be read in an endless loop. The bug appears within the first 6 hours (~5.4 million handled FIFO overruns). The user space application will hang on reading new values from the character device. Fixes: ca9a563805f7a ("iio: ti_am335x_adc: Add continuous sampling support") Signed-off-by: Michael Engl Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ti_am335x_adc.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index ad9dec30bb30..4282ceca3d8f 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) { struct iio_dev *indio_dev = private; struct tiadc_device *adc_dev = iio_priv(indio_dev); - unsigned int status, config; + unsigned int status, config, adc_fsm; + unsigned short count = 0; + status = tiadc_readl(adc_dev, REG_IRQSTATUS); /* @@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) tiadc_writel(adc_dev, REG_CTRL, config); tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); + + /* wait for idle state. + * ADC needs to finish the current conversion + * before disabling the module + */ + do { + adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); + } while (adc_fsm != 0x10 && count++ < 100); + tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); return IRQ_HANDLED; } else if (status & IRQENB_FIFO1THRES) { -- cgit v1.2.3 From 0f424de1fd9bc4ab24bd1fe5430ab5618e803e31 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Mar 2017 14:42:03 -0400 Subject: drm/radeon/si: add dpm quirk for Oland MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OLAND 0x1002:0x6604 0x1028:0x066F 0x00 seems to have problems with higher sclks. Acked-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/si_dpm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index d12b8978142f..72e1588580a1 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6667)) { max_sclk = 75000; } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->device == 0x6604) && + (rdev->pdev->subsystem_vendor == 0x1028) && + (rdev->pdev->subsystem_device == 0x066F)) { + max_sclk = 75000; + } } if (rps->vce_active) { -- cgit v1.2.3 From 18a8de1bc37e97dff1c96ee6cf49adbd02a0f775 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Mar 2017 19:24:19 -0400 Subject: drm/amdgpu/si: add dpm quirk for Oland MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OLAND 0x1002:0x6604 0x1028:0x066F 0x00 seems to have problems with higher sclks. Acked-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index f55e45b52fbc..33b504bafb88 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6667)) { max_sclk = 75000; } + } else if (adev->asic_type == CHIP_OLAND) { + if ((adev->pdev->device == 0x6604) && + (adev->pdev->subsystem_vendor == 0x1028) && + (adev->pdev->subsystem_device == 0x066F)) { + max_sclk = 75000; + } } if (rps->vce_active) { -- cgit v1.2.3 From 801a6aa9a63c90724e8899982ad8c7f16be1e2cd Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 15 Mar 2017 05:34:25 -0400 Subject: drm/amd/amdgpu: Fix debugfs reg read/write address width MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The MMIO space is wider now so we mask the lower 22 bits instead of 18. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4120b351a8e5..a3a105ec99e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, use_bank = 0; } - *pos &= 0x3FFFF; + *pos &= (1UL << 22) - 1; if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || @@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, use_bank = 0; } - *pos &= 0x3FFFF; + *pos &= (1UL << 22) - 1; if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || -- cgit v1.2.3 From 5371bbf4b295eea334ed453efa286afa2c3ccff3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 15 Mar 2017 12:57:21 -0700 Subject: net: bcmgenet: Do not suspend PHY if Wake-on-LAN is enabled Suspending the PHY would be putting it in a low power state where it may no longer allow us to do Wake-on-LAN. Fixes: cc013fb48898 ("net: bcmgenet: correctly suspend and resume PHY device") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 69015fa50f20..365895ed3c3e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d) bcmgenet_netif_stop(dev); - phy_suspend(priv->phydev); + if (!device_may_wakeup(d)) + phy_suspend(priv->phydev); netif_device_detach(dev); @@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d) netif_device_attach(dev); - phy_resume(priv->phydev); + if (!device_may_wakeup(d)) + phy_resume(priv->phydev); if (priv->eee.eee_enabled) bcmgenet_eee_enable_set(dev, true); -- cgit v1.2.3 From 622c36f143fc9566ba49d7cec994c2da1182d9e2 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 15 Mar 2017 15:11:23 -0500 Subject: amd-xgbe: Fix jumbo MTU processing on newer hardware Newer hardware does not provide a cumulative payload length when multiple descriptors are needed to handle the data. Once the MTU increases beyond the size that can be handled by a single descriptor, the SKB does not get built properly by the driver. The driver will now calculate the size of the data buffers used by the hardware. The first buffer of the first descriptor is for packet headers or packet headers and data when the headers can't be split. Subsequent descriptors in a multi-descriptor chain will not use the first buffer. The second buffer is used by all the descriptors in the chain for payload data. Based on whether the driver is processing the first, intermediate, or last descriptor it can calculate the buffer usage and build the SKB properly. Tested and verified on both old and new hardware. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 6 +- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 20 +++--- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 102 +++++++++++++++++----------- 3 files changed, 78 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 8a280e7d66bd..86f1626816ff 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1148,8 +1148,8 @@ #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 -#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 -#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 +#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 @@ -1158,6 +1158,8 @@ #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 +#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 937f37a5dcb2..24a687ce4388 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel) /* Get the header length */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + FIRST, 1); rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, HL); if (rdata->rx.hdr_len) pdata->ext_stats.rx_split_header_packets++; + } else { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + FIRST, 0); } /* Get the RSS hash */ @@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel) } } - /* Get the packet length */ - rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); - - if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { - /* Not all the data has been transferred for this packet */ - XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - INCOMPLETE, 1); + /* Not all the data has been transferred for this packet */ + if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) return 0; - } /* This is the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - INCOMPLETE, 0); + LAST, 1); + + /* Get the packet length */ + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ if (netdev->features & NETIF_F_RXCSUM) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ffea9859f5a7..a713abd9d03e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, { struct sk_buff *skb; u8 *packet; - unsigned int copy_len; skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); if (!skb) return NULL; - /* Start with the header buffer which may contain just the header + /* Pull in the header buffer which may contain just the header * or the header plus data */ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, @@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, packet = page_address(rdata->rx.hdr.pa.pages) + rdata->rx.hdr.pa.pages_offset; - copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; - copy_len = min(rdata->rx.hdr.dma_len, copy_len); - skb_copy_to_linear_data(skb, packet, copy_len); - skb_put(skb, copy_len); - - len -= copy_len; - if (len) { - /* Add the remaining data as a frag */ - dma_sync_single_range_for_cpu(pdata->dev, - rdata->rx.buf.dma_base, - rdata->rx.buf.dma_off, - rdata->rx.buf.dma_len, - DMA_FROM_DEVICE); - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rdata->rx.buf.pa.pages, - rdata->rx.buf.pa.pages_offset, - len, rdata->rx.buf.dma_len); - rdata->rx.buf.pa.pages = NULL; - } + skb_copy_to_linear_data(skb, packet, len); + skb_put(skb, len); return skb; } +static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata, + struct xgbe_packet_data *packet) +{ + /* Always zero if not the first descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) + return 0; + + /* First descriptor with split header, return header length */ + if (rdata->rx.hdr_len) + return rdata->rx.hdr_len; + + /* First descriptor but not the last descriptor and no split header, + * so the full buffer was used + */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) + return rdata->rx.hdr.dma_len; + + /* First descriptor and last descriptor and no split header, so + * calculate how much of the buffer was used + */ + return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); +} + +static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, + struct xgbe_packet_data *packet, + unsigned int len) +{ + /* Always the full buffer if not the last descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) + return rdata->rx.buf.dma_len; + + /* Last descriptor so calculate how much of the buffer was used + * for the last bit of data + */ + return rdata->rx.len - len; +} + static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct napi_struct *napi; struct sk_buff *skb; struct skb_shared_hwtstamps *hwtstamps; - unsigned int incomplete, error, context_next, context; - unsigned int len, rdesc_len, max_len; + unsigned int last, error, context_next, context; + unsigned int len, buf1_len, buf2_len, max_len; unsigned int received = 0; int packet_count = 0; @@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!ring) return 0; - incomplete = 0; + last = 0; context_next = 0; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; @@ -2137,9 +2155,8 @@ read_again: received++; ring->cur++; - incomplete = XGMAC_GET_BITS(packet->attributes, - RX_PACKET_ATTRIBUTES, - INCOMPLETE); + last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + LAST); context_next = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT); @@ -2148,7 +2165,7 @@ read_again: CONTEXT); /* Earlier error, just drain the remaining data */ - if ((incomplete || context_next) && error) + if ((!last || context_next) && error) goto read_again; if (error || packet->errors) { @@ -2160,16 +2177,22 @@ read_again: } if (!context) { - /* Length is cumulative, get this descriptor's length */ - rdesc_len = rdata->rx.len - len; - len += rdesc_len; + /* Get the data length in the descriptor buffers */ + buf1_len = xgbe_rx_buf1_len(rdata, packet); + len += buf1_len; + buf2_len = xgbe_rx_buf2_len(rdata, packet, len); + len += buf2_len; - if (rdesc_len && !skb) { + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, - rdesc_len); - if (!skb) + buf1_len); + if (!skb) { error = 1; - } else if (rdesc_len) { + goto skip_data; + } + } + + if (buf2_len) { dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.buf.dma_base, rdata->rx.buf.dma_off, @@ -2179,13 +2202,14 @@ read_again: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rdata->rx.buf.pa.pages, rdata->rx.buf.pa.pages_offset, - rdesc_len, + buf2_len, rdata->rx.buf.dma_len); rdata->rx.buf.pa.pages = NULL; } } - if (incomplete || context_next) +skip_data: + if (!last || context_next) goto read_again; if (!skb) @@ -2243,7 +2267,7 @@ next_packet: } /* Check if we need to save state before leaving */ - if (received && (incomplete || context_next)) { + if (received && (!last || context_next)) { rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdata->state_saved = 1; rdata->state.skb = skb; -- cgit v1.2.3 From 9b4f603e7a9f4282aec451063ffbbb8bb410dcd9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Mar 2017 00:12:16 +0100 Subject: cpufreq: Fix and clean up show_cpuinfo_cur_freq() There is a missing newline in show_cpuinfo_cur_freq(), so add it, but while at it clean that function up somewhat too. Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Cc: All applicable --- drivers/cpufreq/cpufreq.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 38b9fdf854a4..b8ff617d449d 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, char *buf) { unsigned int cur_freq = __cpufreq_get(policy); - if (!cur_freq) - return sprintf(buf, ""); - return sprintf(buf, "%u\n", cur_freq); + + if (cur_freq) + return sprintf(buf, "%u\n", cur_freq); + + return sprintf(buf, "\n"); } /** -- cgit v1.2.3 From a733ded50b6ea846200073e7381a302df71e13b3 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Sun, 5 Mar 2017 21:40:41 +0200 Subject: mei: fix deadlock on mei reset This patch fixes 'mei: synchronize irq before initiating a reset' The patch had introduced a deadlock between irq thread and mei_reset() as they are both holding the same device lock. ---> device_lock: mei_reset() <---- interrupt thread device_lock ---> synchornize_irq() wait on interrupt thread == (dead lock) The fix is to call synchronize_irq prior to call locked mei_reset function. Cc: #4.10+ Fixes: f302bb0de6ac (mei: synchronize irq before initiating a reset) Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/init.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index cfb1cdf176fa..13c55b8f9261 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev) mei_clear_interrupts(dev); - mei_synchronize_irq(dev); - /* we're already in reset, cancel the init timer * if the reset was called due the hbm protocol error * we need to call it before hw start @@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work) container_of(work, struct mei_device, reset_work); int ret; + mei_clear_interrupts(dev); + mei_synchronize_irq(dev); + mutex_lock(&dev->device_lock); ret = mei_reset(dev); @@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev) mei_cancel_work(dev); + mei_clear_interrupts(dev); + mei_synchronize_irq(dev); + mutex_lock(&dev->device_lock); dev->dev_state = MEI_DEV_POWER_DOWN; -- cgit v1.2.3 From c6240cacdb2c3cb56a21fb3ea0c105154ab87a2a Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Sun, 5 Mar 2017 21:40:42 +0200 Subject: mei: don't wait for os version message reply The driver still struggles with firmwares that do not replay to the OS version request. It is safe not waiting for the replay. First, the driver doesn't do anything with the replay second the connection is closed immediately, hence the packet will be just safely discarded in case it is received and last the driver won't get stuck if the firmware won't reply. Cc: #4.10+ Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus-fixup.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 3600c9993a98..29f2daed37e0 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -112,11 +112,9 @@ struct mkhi_msg { static int mei_osver(struct mei_cl_device *cldev) { - int ret; const size_t size = sizeof(struct mkhi_msg_hdr) + sizeof(struct mkhi_fwcaps) + sizeof(struct mei_os_ver); - size_t length = 8; char buf[size]; struct mkhi_msg *req; struct mkhi_fwcaps *fwcaps; @@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev) os_ver = (struct mei_os_ver *)fwcaps->data; os_ver->os_type = OSTYPE_LINUX; - ret = __mei_cl_send(cldev->cl, buf, size, mode); - if (ret < 0) - return ret; - - ret = __mei_cl_recv(cldev->cl, buf, length, 0); - if (ret < 0) - return ret; - - return 0; + return __mei_cl_send(cldev->cl, buf, size, mode); } static void mei_mkhi_fix(struct mei_cl_device *cldev) @@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) return; ret = mei_osver(cldev); - if (ret) + if (ret < 0) dev_err(&cldev->dev, "OS version command failed %d\n", ret); mei_cldev_disable(cldev); -- cgit v1.2.3 From 8200f2085abe7f29a016381f3122000cc7b2a760 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sat, 4 Mar 2017 18:13:57 -0700 Subject: vmbus: use rcu for per-cpu channel list The per-cpu channel list is now referred to in the interrupt routine. This is mostly safe since the host will not normally generate an interrupt when channel is being deleted but if it did then there would be a use after free problem. To solve, this use RCU protection on ther per-cpu list. Fixes: 631e63a9f346 ("vmbus: change to per channel tasklet") Signed-off-by: Stephen Hemminger Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel_mgmt.c | 7 ++++--- drivers/hv/vmbus_drv.c | 6 +++++- include/linux/hyperv.h | 7 +++++++ 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index f33465d78a02..d2cfa3eb71a2 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void) static void free_channel(struct vmbus_channel *channel) { tasklet_kill(&channel->callback_event); - kfree(channel); + + kfree_rcu(channel, rcu); } static void percpu_channel_enq(void *arg) @@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg) struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); - list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); + list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list); } static void percpu_channel_deq(void *arg) { struct vmbus_channel *channel = arg; - list_del(&channel->percpu_list); + list_del_rcu(&channel->percpu_list); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index da6b59ba5940..8370b9dc6037 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) if (relid == 0) continue; + rcu_read_lock(); + /* Find channel based on relid */ - list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { + list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) { if (channel->offermsg.child_relid != relid) continue; @@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) tasklet_schedule(&channel->callback_event); } } + + rcu_read_unlock(); } } diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 62bbf3c1aa4a..c4c7ae91f9d1 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -845,6 +845,13 @@ struct vmbus_channel { * link up channels based on their CPU affinity. */ struct list_head percpu_list; + + /* + * Defer freeing channel until after all cpu's have + * gone through grace period. + */ + struct rcu_head rcu; + /* * For performance critical channels (storage, networking * etc,), Hyper-V has a mechanism to enhance the throughput -- cgit v1.2.3 From dad72a1d28442b03aac86836a42de2d00a1014ab Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Sat, 4 Mar 2017 18:13:58 -0700 Subject: vmbus: remove hv_event_tasklet_disable/enable With the recent introduction of per-channel tasklet, we need to update the way we handle the 3 concurrency issues: 1. hv_process_channel_removal -> percpu_channel_deq vs. vmbus_chan_sched -> list_for_each_entry(..., percpu_list); 2. vmbus_process_offer -> percpu_channel_enq/deq vs. vmbus_chan_sched. 3. vmbus_close_internal vs. the per-channel tasklet vmbus_on_event; The first 2 issues can be handled by Stephen's recent patch "vmbus: use rcu for per-cpu channel list", and the third issue can be handled by calling tasklet_disable in vmbus_close_internal here. We don't need the original hv_event_tasklet_disable/enable since we now use per-channel tasklet instead of the previous per-CPU tasklet, and actually we must remove them due to the side effect now: vmbus_process_offer -> hv_event_tasklet_enable -> tasklet_schedule will start the per-channel callback prematurely, cauing NULL dereferencing (the channel may haven't been properly configured to run the callback yet). Fixes: 631e63a9f346 ("vmbus: change to per channel tasklet") Signed-off-by: Dexuan Cui Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Tested-by: Vitaly Kuznetsov Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel.c | 12 ++++-------- drivers/hv/channel_mgmt.c | 19 ------------------- include/linux/hyperv.h | 3 --- 3 files changed, 4 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index bd0d1988feb2..57b2958205c7 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -530,15 +530,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel) int ret; /* - * vmbus_on_event(), running in the tasklet, can race + * vmbus_on_event(), running in the per-channel tasklet, can race * with vmbus_close_internal() in the case of SMP guest, e.g., when * the former is accessing channel->inbound.ring_buffer, the latter - * could be freeing the ring_buffer pages. - * - * To resolve the race, we can serialize them by disabling the - * tasklet when the latter is running here. + * could be freeing the ring_buffer pages, so here we must stop it + * first. */ - hv_event_tasklet_disable(channel); + tasklet_disable(&channel->callback_event); /* * In case a device driver's probe() fails (e.g., @@ -605,8 +603,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); out: - hv_event_tasklet_enable(channel); - return ret; } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index d2cfa3eb71a2..bf846d078d85 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -382,19 +382,6 @@ static void vmbus_release_relid(u32 relid) true); } -void hv_event_tasklet_disable(struct vmbus_channel *channel) -{ - tasklet_disable(&channel->callback_event); -} - -void hv_event_tasklet_enable(struct vmbus_channel *channel) -{ - tasklet_enable(&channel->callback_event); - - /* In case there is any pending event */ - tasklet_schedule(&channel->callback_event); -} - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) { unsigned long flags; @@ -403,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) BUG_ON(!channel->rescind); BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); - hv_event_tasklet_disable(channel); if (channel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(channel->target_cpu, @@ -412,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) percpu_channel_deq(channel); put_cpu(); } - hv_event_tasklet_enable(channel); if (channel->primary_channel == NULL) { list_del(&channel->listentry); @@ -506,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) init_vp_index(newchannel, dev_type); - hv_event_tasklet_disable(newchannel); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, @@ -516,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) percpu_channel_enq(newchannel); put_cpu(); } - hv_event_tasklet_enable(newchannel); /* * This state is used to indicate a successful open @@ -566,7 +549,6 @@ err_deq_chan: list_del(&newchannel->listentry); mutex_unlock(&vmbus_connection.channel_mutex); - hv_event_tasklet_disable(newchannel); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, @@ -575,7 +557,6 @@ err_deq_chan: percpu_channel_deq(newchannel); put_cpu(); } - hv_event_tasklet_enable(newchannel); vmbus_release_relid(newchannel->offermsg.child_relid); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index c4c7ae91f9d1..970771a5f739 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1437,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, const int *srv_version, int srv_vercnt, int *nego_fw_version, int *nego_srv_version); -void hv_event_tasklet_disable(struct vmbus_channel *channel); -void hv_event_tasklet_enable(struct vmbus_channel *channel); - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); void vmbus_setevent(struct vmbus_channel *channel); -- cgit v1.2.3 From e9c18ae6eb2b312f16c63e34b43ea23926daa398 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sat, 4 Mar 2017 18:13:59 -0700 Subject: Drivers: hv: util: move waiting for release to hv_utils_transport itself Waiting for release_event in all three drivers introduced issues on release as on_reset() hook is not always called. E.g. if the device was never opened we will never get the completion. Move the waiting code to hvutil_transport_destroy() and make sure it is only called when the device is open. hvt->lock serialization should guarantee the absence of races. Fixes: 5a66fecbf6aa ("Drivers: hv: util: kvp: Fix a rescind processing issue") Fixes: 20951c7535b5 ("Drivers: hv: util: Fcopy: Fix a rescind processing issue") Fixes: d77044d142e9 ("Drivers: hv: util: Backup: Fix a rescind processing issue") Reported-by: Dexuan Cui Tested-by: Dexuan Cui Signed-off-by: Vitaly Kuznetsov Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/hv_fcopy.c | 4 ---- drivers/hv/hv_kvp.c | 4 ---- drivers/hv/hv_snapshot.c | 4 ---- drivers/hv/hv_utils_transport.c | 12 ++++++++---- drivers/hv/hv_utils_transport.h | 1 + 5 files changed, 9 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index 9aee6014339d..a5596a642ed0 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); static const char fcopy_devname[] = "vmbus/hv_fcopy"; static u8 *recv_buffer; static struct hvutil_transport *hvt; -static struct completion release_event; /* * This state maintains the version number registered by the daemon. */ @@ -331,7 +330,6 @@ static void fcopy_on_reset(void) if (cancel_delayed_work_sync(&fcopy_timeout_work)) fcopy_respond_to_host(HV_E_FAIL); - complete(&release_event); } int hv_fcopy_init(struct hv_util_service *srv) @@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv) recv_buffer = srv->recv_buffer; fcopy_transaction.recv_channel = srv->channel; - init_completion(&release_event); /* * When this driver loads, the user level daemon that * processes the host requests may not yet be running. @@ -361,5 +358,4 @@ void hv_fcopy_deinit(void) fcopy_transaction.state = HVUTIL_DEVICE_DYING; cancel_delayed_work_sync(&fcopy_timeout_work); hvutil_transport_destroy(hvt); - wait_for_completion(&release_event); } diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index de263712e247..a1adfe2cfb34 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); static const char kvp_devname[] = "vmbus/hv_kvp"; static u8 *recv_buffer; static struct hvutil_transport *hvt; -static struct completion release_event; /* * Register the kernel component with the user-level daemon. * As part of this registration, pass the LIC version number. @@ -714,7 +713,6 @@ static void kvp_on_reset(void) if (cancel_delayed_work_sync(&kvp_timeout_work)) kvp_respond_to_host(NULL, HV_E_FAIL); kvp_transaction.state = HVUTIL_DEVICE_INIT; - complete(&release_event); } int @@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv) recv_buffer = srv->recv_buffer; kvp_transaction.recv_channel = srv->channel; - init_completion(&release_event); /* * When this driver loads, the user level daemon that * processes the host requests may not yet be running. @@ -747,5 +744,4 @@ void hv_kvp_deinit(void) cancel_delayed_work_sync(&kvp_timeout_work); cancel_work_sync(&kvp_sendkey_work); hvutil_transport_destroy(hvt); - wait_for_completion(&release_event); } diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index bcc03f0748d6..e659d1b94a57 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -79,7 +79,6 @@ static int dm_reg_value; static const char vss_devname[] = "vmbus/hv_vss"; static __u8 *recv_buffer; static struct hvutil_transport *hvt; -static struct completion release_event; static void vss_timeout_func(struct work_struct *dummy); static void vss_handle_request(struct work_struct *dummy); @@ -361,13 +360,11 @@ static void vss_on_reset(void) if (cancel_delayed_work_sync(&vss_timeout_work)) vss_respond_to_host(HV_E_FAIL); vss_transaction.state = HVUTIL_DEVICE_INIT; - complete(&release_event); } int hv_vss_init(struct hv_util_service *srv) { - init_completion(&release_event); if (vmbus_proto_version < VERSION_WIN8_1) { pr_warn("Integration service 'Backup (volume snapshot)'" " not supported on this host version.\n"); @@ -400,5 +397,4 @@ void hv_vss_deinit(void) cancel_delayed_work_sync(&vss_timeout_work); cancel_work_sync(&vss_handle_request_work); hvutil_transport_destroy(hvt); - wait_for_completion(&release_event); } diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index c235a9515267..4402a71e23f7 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file) * connects back. */ hvt_reset(hvt); - mutex_unlock(&hvt->lock); if (mode_old == HVUTIL_TRANSPORT_DESTROY) - hvt_transport_free(hvt); + complete(&hvt->release); + + mutex_unlock(&hvt->lock); return 0; } @@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name, init_waitqueue_head(&hvt->outmsg_q); mutex_init(&hvt->lock); + init_completion(&hvt->release); spin_lock(&hvt_list_lock); list_add(&hvt->list, &hvt_list); @@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt) if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) cn_del_callback(&hvt->cn_id); - if (mode_old != HVUTIL_TRANSPORT_CHARDEV) - hvt_transport_free(hvt); + if (mode_old == HVUTIL_TRANSPORT_CHARDEV) + wait_for_completion(&hvt->release); + + hvt_transport_free(hvt); } diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h index d98f5225c3e6..79afb626e166 100644 --- a/drivers/hv/hv_utils_transport.h +++ b/drivers/hv/hv_utils_transport.h @@ -41,6 +41,7 @@ struct hvutil_transport { int outmsg_len; /* its length */ wait_queue_head_t outmsg_q; /* poll/read wait queue */ struct mutex lock; /* protects struct members */ + struct completion release; /* synchronize with fd release */ }; struct hvutil_transport *hvutil_transport_init(const char *name, -- cgit v1.2.3 From 5a16dfc855127906fcd2935fb039bc8989313915 Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Sat, 4 Mar 2017 18:14:00 -0700 Subject: Drivers: hv: util: don't forget to init host_ts.lock Without the patch, I always get a "BUG: spinlock bad magic" warning. Fixes: 3716a49a81ba ("hv_utils: implement Hyper-V PTP source") Signed-off-by: Dexuan Cui Cc: Vitaly Kuznetsov Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/hv_util.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 3042eaa13062..186b10083c55 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv) if (!hyperv_cs) return -ENODEV; + spin_lock_init(&host_ts.lock); + INIT_WORK(&wrk.work, hv_set_host_time); /* -- cgit v1.2.3 From 9a5476020a5f06a0fc6f17097efc80275d2f03cd Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Mon, 13 Mar 2017 15:57:09 -0700 Subject: Drivers: hv: vmbus: Don't leak channel ids If we cannot allocate memory for the channel, free the relid associated with the channel. Signed-off-by: K. Y. Srinivasan Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel_mgmt.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index bf846d078d85..fbcb06352308 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -796,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) /* Allocate the channel object and save this offer. */ newchannel = alloc_channel(); if (!newchannel) { + vmbus_release_relid(offer->child_relid); pr_err("Unable to allocate channel object\n"); return; } -- cgit v1.2.3 From 5e030d5ce9d99a899b648413139ff65bab12b038 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Sun, 12 Mar 2017 20:00:30 -0700 Subject: Drivers: hv: vmbus: Don't leak memory when a channel is rescinded When we close a channel that has been rescinded, we will leak memory since vmbus_teardown_gpadl() returns an error. Fix this so that we can properly cleanup the memory allocated to the ring buffers. Fixes: ccb61f8a99e6 ("Drivers: hv: vmbus: Fix a rescind handling bug") Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 57b2958205c7..321b8833fa6f 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) wait_for_completion(&info->waitevent); - if (channel->rescind) { - ret = -ENODEV; - goto post_msg_err; - } - post_msg_err: + /* + * If the channel has been rescinded; + * we will be awakened by the rescind + * handler; set the error code to zero so we don't leak memory. + */ + if (channel->rescind) + ret = 0; + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&info->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); -- cgit v1.2.3 From 9a3fcf912ef7f5c6e18f9af6875dd13f7311f7aa Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 14 Mar 2017 09:50:35 +0200 Subject: iwlwifi: mvm: cleanup pending frames in DQA mode When a station is asleep, the fw will set it as "asleep". All queues that are used only by one station will be stopped by the fw. In pre-DQA mode this was relevant for aggregation queues. However, in DQA mode a queue is owned by one station only, so all queues will be stopped. As a result, we don't expect to get filtered frames back to mac80211 and don't have to maintain the entire pending_frames state logic, the same way as we do in aggregations. The correct behavior is to align DQA behavior with the aggregation queue behaviour pre-DQA: - Don't count pending frames. - Let mac80211 know we have frames in these queues so that it can properly handle trigger frames. When a trigger frame is received, mac80211 tells the driver to send frames from the queues using release_buffered_frames. The driver will tell the fw to let frames out even if the station is asleep. This is done by iwl_mvm_sta_modify_sleep_tx_count. Reported-and-tested-by: Jens Axboe Reported-by: Linus Torvalds Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 5 +-- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 11 +++--- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 41 ++++++++++------------- 4 files changed, 28 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d37b1695c64e..6927caecd48e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - /* Called when we need to transmit (a) frame(s) from agg queue */ + /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); @@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - if (tid_data->state != IWL_AGG_ON && + if (!iwl_mvm_is_dqa_supported(mvm) && + tid_data->state != IWL_AGG_ON && tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) continue; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index bd1dcc863d8f..b51a2853cc80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -3135,7 +3135,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, - bool agg) + bool single_sta_queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { @@ -3155,14 +3155,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); - /* If we're releasing frames from aggregation queues then check if the - * all queues combined that we're releasing frames from have + /* If we're releasing frames from aggregation or dqa queues then check + * if all the queues that we're releasing frames from, combined, have: * - more frames than the service period, in which case more_data * needs to be set * - fewer than 'cnt' frames, in which case we need to adjust the * firmware command (but do that unconditionally) */ - if (agg) { + if (single_sta_queue) { int remaining = cnt; int sleep_tx_count; @@ -3172,7 +3172,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, u16 n_queued; tid_data = &mvmsta->tid_data[tid]; - if (WARN(tid_data->state != IWL_AGG_ON && + if (WARN(!iwl_mvm_is_dqa_supported(mvm) && + tid_data->state != IWL_AGG_ON && tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, "TID %d state is %d\n", tid, tid_data->state)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 4be34f902278..1927ce607798 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, - bool agg); + bool single_sta_queue); int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain); void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dd2b4a300819..3f37075f4cde 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -628,8 +629,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * values. * Note that we don't need to make sure it isn't agg'd, since we're * TXing non-sta + * For DQA mode - we shouldn't increase it though */ - atomic_inc(&mvm->pending_frames[sta_id]); + if (!iwl_mvm_is_dqa_supported(mvm)) + atomic_inc(&mvm->pending_frames[sta_id]); return 0; } @@ -1005,11 +1008,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - /* Increase pending frames count if this isn't AMPDU */ - if ((iwl_mvm_is_dqa_supported(mvm) && - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || - (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) + /* Increase pending frames count if this isn't AMPDU or DQA queue */ + if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -1079,12 +1079,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || + iwl_mvm_is_dqa_supported(mvm)) && iwl_mvm_tid_queued(tid_data) == 0) { /* - * Now that this aggregation queue is empty tell mac80211 so it - * knows we no longer have frames buffered for the station on - * this TID (for the TIM bitmap calculation.) + * Now that this aggregation or DQA queue is empty tell + * mac80211 so it knows we no longer have frames buffered for + * the station on this TID (for the TIM bitmap calculation.) */ ieee80211_sta_set_buffered(sta, tid, false); } @@ -1257,7 +1258,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, u8 skb_freed = 0; u16 next_reclaimed, seq_ctl; bool is_ndp = false; - bool txq_agg = false; /* Is this TXQ aggregated */ __skb_queue_head_init(&skbs); @@ -1283,6 +1283,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_DEST_PS: + /* In DQA, the FW should have stopped the queue and not + * return this status + */ + WARN_ON(iwl_mvm_is_dqa_supported(mvm)); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: @@ -1387,15 +1391,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); - if (iwl_mvm_is_dqa_supported(mvm)) { - enum iwl_mvm_agg_state state; - - state = mvmsta->tid_data[tid].state; - txq_agg = (state == IWL_AGG_ON || - state == IWL_EMPTYING_HW_QUEUE_DELBA); - } else { - txq_agg = txq_id >= mvm->first_agg_queue; - } if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; @@ -1452,11 +1447,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... */ - if (txq_agg) + if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) goto out; /* We can't free more than one frame at once on a shared queue */ - WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); + WARN_ON(skb_freed > 1); /* If we have still frames for this STA nothing to do here */ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) -- cgit v1.2.3 From b7048ea12fbb2724ee0cd30752d4fac43cab0651 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 15 Mar 2017 16:31:58 +0200 Subject: drm/i915: Do .init_clock_gating() earlier to avoid it clobbering watermarks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently ILK-BDW explicitly disable LP1+ watermarks from their .init_clock_gating() hooks. Unfortunately that hook gets called way too late since by that time we've already initialized all the watermark state tracking which then gets out of sync with the hardware state. We may eventually want to consider killing off the explicit LP1+ disable from .init_clock_gating(). In the meantime however, we can avoid the problem by reordering the init sequence such that intel_modeset_init_hw()->intel_init_clock_gating() gets called prior to the hardware state takeover. I suppose prior to the two stage watermark programming we were magically saved by something that forced the watermarks to be reprogrammed fully after .init_clock_gating() got called. But now that no longer happens. Note that the diff might look a bit odd as it kills off one call of intel_update_cdclk(), but that's fine because intel_modeset_init_hw() does the exact same thing. Previously we just did it twice. Actually even this new init sequence is pretty bogus as .init_clock_gating() really should be called before any gem hardware init since it can configure various clock gating workarounds and whatnot that affect the GT side as well. Also intel_modeset_init() really should get split up into better defined init stages. Another "fun" detail is that intel_modeset_gem_init() is where RPS/RC6 gets configured. Why that is done from the display code is beyond me. I've decided to leave all this be for now, and just try to fix the init sequence enough for watermarks to work. Cc: stable@vger.kernel.org Cc: Gabriele Mazzotta Cc: David Purton Cc: Matt Roper Cc: Maarten Lankhorst Reported-by: Gabriele Mazzotta Reported-by: David Purton Tested-by: Gabriele Mazzotta Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=96645 Fixes: ed4a6a7ca853 ("drm/i915: Add two-stage ILK-style watermark programming (v11)") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170220140443.30891-1-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170315143158.31780-1-ville.syrjala@linux.intel.com (cherry picked from commit 5be6e33400992d3450e1c8234a5af353e1560580) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3282b0f4b134..ed1f4f272b4f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev) } } - intel_update_czclk(dev_priv); - intel_update_cdclk(dev_priv); - dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; - intel_shared_dpll_init(dev); + intel_update_czclk(dev_priv); + intel_modeset_init_hw(dev); + if (dev_priv->max_cdclk_freq == 0) intel_update_max_cdclk(dev_priv); @@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev_priv); - intel_modeset_init_hw(dev); - intel_setup_overlay(dev_priv); } -- cgit v1.2.3 From abda288bb207e5c681306299126af8c022709c18 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 19 Feb 2017 16:33:35 -0800 Subject: auxdisplay: img-ascii-lcd: add missing sentinel entry in img_ascii_lcd_matches The OF device table must be terminated, otherwise we'll be walking past it and into areas unknown. Fixes: 0cad855fbd08 ("auxdisplay: img-ascii-lcd: driver for simple ASCII...") Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov Tested-by: Fengguang Wu Signed-off-by: Greg Kroah-Hartman --- drivers/auxdisplay/img-ascii-lcd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index bf43b5d2aafc..83f1439e57fd 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = { { .compatible = "img,boston-lcd", .data = &boston_config }, { .compatible = "mti,malta-lcd", .data = &malta_config }, { .compatible = "mti,sead3-lcd", .data = &sead3_config }, + { /* sentinel */ } }; /** -- cgit v1.2.3 From 4e841d3eb9294ce4137fdb5d0a88f1bceab9c212 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 10 Mar 2017 17:39:21 -0800 Subject: mwifiex: pcie: don't leak DMA buffers when removing When PCIe FLR support was added, much of the remove/release code for PCIe was migrated to ->down_dev(), but ->down_dev() is never called for device removal. Let's refactor the cleanup to be done in both cases. Also, drop the comments above mwifiex_cleanup_pcie(), because they were clearly wrong, and it's better to have clear and obvious code than to detail the code steps in comments anyway. Fixes: 4c5dae59d2e9 ("mwifiex: add PCIe function level reset support") Cc: Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 38 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index a0d918094889..b8c990d10d6e 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) schedule_work(&card->work); } +static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + if (reg->sleep_cookie) + mwifiex_pcie_delete_sleep_cookie_buf(adapter); + + mwifiex_pcie_delete_cmdrsp_buf(adapter); + mwifiex_pcie_delete_evtbd_ring(adapter); + mwifiex_pcie_delete_rxbd_ring(adapter); + mwifiex_pcie_delete_txbd_ring(adapter); + card->cmdrsp_buf = NULL; +} + /* * This function initializes the PCI-E host memory space, WCB rings, etc. * @@ -2850,13 +2865,6 @@ err_enable_dev: /* * This function cleans up the allocated card buffers. - * - * The following are freed by this function - - * - TXBD ring buffers - * - RXBD ring buffers - * - Event BD ring buffers - * - Command response ring buffer - * - Sleep cookie buffer */ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) { @@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) "Failed to write driver not-ready signature\n"); } + mwifiex_pcie_free_buffers(adapter); + if (pdev) { pci_iounmap(pdev, card->pci_mmap); pci_iounmap(pdev, card->pci_mmap1); @@ -3126,10 +3136,7 @@ err_cre_txbd: pci_iounmap(pdev, card->pci_mmap1); } -/* This function cleans up the PCI-E host memory space. - * Some code is extracted from mwifiex_unregister_dev() - * - */ +/* This function cleans up the PCI-E host memory space. */ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; @@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) adapter->seq_num = 0; - if (reg->sleep_cookie) - mwifiex_pcie_delete_sleep_cookie_buf(adapter); - - mwifiex_pcie_delete_cmdrsp_buf(adapter); - mwifiex_pcie_delete_evtbd_ring(adapter); - mwifiex_pcie_delete_rxbd_ring(adapter); - mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; + mwifiex_pcie_free_buffers(adapter); } static struct mwifiex_if_ops pcie_ops = { -- cgit v1.2.3 From ba1c7e45ec224cc8d2df33ecaee1946d48e79231 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 10 Mar 2017 17:39:22 -0800 Subject: mwifiex: set adapter->dev before starting to use mwifiex_dbg() The mwifiex_dbg() log handler utilizes the struct device in adapter->dev. Without it, it decides not to print anything. As of commit 2e02b5814217 ("mwifiex: Allow mwifiex early access to device structure"), we started assigning that pointer only after we finished mwifiex_register() -- this effectively neuters any mwifiex_dbg() logging done before this point. Let's move the device assignment into mwifiex_register(). Fixes: 2e02b5814217 ("mwifiex: Allow mwifiex early access to device structure") Cc: Rajat Jain Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 5ebca1d0cfc7..43d040e02e4d 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); * In case of any errors during inittialization, this function also ensures * proper cleanup before exiting. */ -static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, - void **padapter) +static int mwifiex_register(void *card, struct device *dev, + struct mwifiex_if_ops *if_ops, void **padapter) { struct mwifiex_adapter *adapter; int i; @@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, return -ENOMEM; *padapter = adapter; + adapter->dev = dev; adapter->card = card; /* Save interface specific operations in adapter */ @@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done, { struct mwifiex_adapter *adapter; - if (mwifiex_register(card, if_ops, (void **)&adapter)) { + if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) { pr_err("%s: software init failed\n", __func__); goto err_init_sw; } - adapter->dev = dev; mwifiex_probe_of(adapter); adapter->iface_type = iface_type; -- cgit v1.2.3 From 36908c4e5b1063eff3e11336fab544a76c625b69 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 10 Mar 2017 17:39:23 -0800 Subject: mwifiex: uninit wakeup info when removing device We manually init wakeup info, but we don't detach it on device removal. This means that if we (for example) rmmod + modprobe the driver, the device framework might return -EEXIST the second time, and we'll complain in the logs: [ 839.311881] mwifiex_pcie 0000:01:00.0: fail to init wakeup for mwifiex AFAICT, there's no other negative effect. But we can fix this by disabling wakeup on remove, similar to what a few other drivers do (e.g., the power supply framework). This code (and bug) has existed on SDIO for a while, but it got moved around and enabled for PCIe with commit 853402a00823 ("mwifiex: Enable WoWLAN for both sdio and pcie"). Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 43d040e02e4d..b62e03d11c2e 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); + /* Unregister device */ mwifiex_dbg(adapter, INFO, "info: unregister device\n"); -- cgit v1.2.3 From 03270c6ac6207fc55bbf9d20d195029dca210c79 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Mon, 6 Mar 2017 23:23:42 +0000 Subject: parport: fix attempt to write duplicate procfiles Usually every parallel port will have a single pardev registered with it. But ppdev driver is an exception. This userspace parallel port driver allows to create multiple parrallel port devices for a single parallel port. And as a result we were having a nice warning like: "sysctl table check failed: /dev/parport/parport0/devices/ppdev0/timeslice Sysctl already exists" Use the same logic as used in parport_register_device() and register the proc files only once for each parallel port. Fixes: 6fa45a226897 ("parport: add device-model to parport subsystem") Cc: stable # v4.4+ Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1414656 Bugzilla: https://bugs.archlinux.org/task/52322 Tested-by: James Feeney Signed-off-by: Sudip Mukherjee Signed-off-by: Greg Kroah-Hartman --- drivers/parport/share.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/parport/share.c b/drivers/parport/share.c index bc090daa850a..5dc53d420ca8 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name, * pardevice fields. -arca */ port->ops->init_state(par_dev, par_dev->state); - port->proc_device = par_dev; - parport_device_proc_register(par_dev); + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { + port->proc_device = par_dev; + parport_device_proc_register(par_dev); + } return par_dev; -- cgit v1.2.3 From 9a69645dde1188723d80745c1bc6ee9af2cbe2a7 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Mon, 6 Mar 2017 23:23:43 +0000 Subject: ppdev: fix registering same device name Usually every parallel port will have a single pardev registered with it. But ppdev driver is an exception. This userspace parallel port driver allows to create multiple parrallel port devices for a single parallel port. And as a result we were having a big warning like: "sysfs: cannot create duplicate filename '/devices/parport0/ppdev0.0'". And with that many parallel port printers stopped working. We have been using the minor number as the id field while registering a parralel port device with a parralel port. But when there are multiple parrallel port device for one single parallel port, they all tried to register with the same name like 'pardev0.0' and everything started failing. Use an incremented index as the id instead of the minor number. Fixes: 8b7d3a9d903e ("ppdev: use new parport device model") Cc: stable # v4.9+ Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1414656 Bugzilla: https://bugs.archlinux.org/task/52322 Tested-by: James Feeney Signed-off-by: Sudip Mukherjee Signed-off-by: Greg Kroah-Hartman --- drivers/char/ppdev.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 2a558c706581..3e73bcdf9e65 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -84,11 +84,14 @@ struct pp_struct { struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; + int index; }; /* should we use PARDEVICE_MAX here? */ static struct device *devices[PARPORT_MAX]; +static DEFINE_IDA(ida_index); + /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) @@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp) struct pardevice *pdev = NULL; char *name; struct pardev_cb ppdev_cb; - int rc = 0; + int rc = 0, index; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) @@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp) goto err; } + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; - pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); + pdev = parport_register_dev_model(port, name, &ppdev_cb, index); parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; + ida_simple_remove(&ida_index, index); goto err; } pp->pdev = pdev; + pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); err: kfree(name); @@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file) if (pp->pdev) { parport_unregister_device(pp->pdev); + ida_simple_remove(&ida_index, pp->index); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } -- cgit v1.2.3 From c3423563c68fc454b805b46cb69fd4816db933e2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 3 Mar 2017 07:58:06 -0700 Subject: vmw_vmci: handle the return value from pci_alloc_irq_vectors correctly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It returns the number of vectors allocated when successful, so check for a negative error only. Fixes: 3bb434cd ("vmw_vmci: switch to pci_irq_alloc_vectors") Signed-off-by: Christoph Hellwig Reported-by: Loïc Yhuel Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 9d659542a335..dad5abee656e 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, */ error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, PCI_IRQ_MSIX); - if (error) { + if (error < 0) { error = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (error) + if (error < 0) goto err_remove_bitmap; } else { vmci_dev->exclusive_vectors = true; -- cgit v1.2.3 From 7c468447f40645fbf2a033dfdaa92b1957130d50 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Fri, 10 Mar 2017 12:28:18 -0600 Subject: crypto: ccp - Assign DMA commands to the channel's CCP The CCP driver generally uses a round-robin approach when assigning operations to available CCPs. For the DMA engine, however, the DMA mappings of the SGs are associated with a specific CCP. When an IOMMU is enabled, the IOMMU is programmed based on this specific device. If the DMA operations are not performed by that specific CCP then addressing errors and I/O page faults will occur. Update the CCP driver to allow a specific CCP device to be requested for an operation and use this in the DMA engine support. Cc: # 4.9.x- Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.c | 5 ++++- drivers/crypto/ccp/ccp-dmaengine.c | 1 + include/linux/ccp.h | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 511ab042b5e7..92d1c6959f08 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version); */ int ccp_enqueue_cmd(struct ccp_cmd *cmd) { - struct ccp_device *ccp = ccp_get_device(); + struct ccp_device *ccp; unsigned long flags; unsigned int i; int ret; + /* Some commands might need to be sent to a specific device */ + ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); + if (!ccp) return -ENODEV; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index e5d9278f4019..8d0eeb46d4a2 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, goto err; ccp_cmd = &cmd->ccp_cmd; + ccp_cmd->ccp = chan->ccp; ccp_pt = &ccp_cmd->u.passthru_nomap; ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; diff --git a/include/linux/ccp.h b/include/linux/ccp.h index c71dd8fa5764..c41b8d99dd0e 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -556,7 +556,7 @@ enum ccp_engine { * struct ccp_cmd - CCP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) - * @ccp: CCP device to be run on (ccp driver use only) + * @ccp: CCP device to be run on * @ret: operation return code (ccp driver use only) * @flags: cmd processing flags * @engine: CCP operation to perform -- cgit v1.2.3 From 69db7009318758769d625b023402161c750f7876 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Tue, 14 Mar 2017 07:36:01 -0400 Subject: hwrng: amd - Revert managed API changes After commit 31b2a73c9c5f ("hwrng: amd - Migrate to managed API"), the amd-rng driver uses devres with pci_dev->dev to keep track of resources, but does not actually register a PCI driver. This results in the following issues: 1. The message WARNING: CPU: 2 PID: 621 at drivers/base/dd.c:349 driver_probe_device+0x38c is output when the i2c_amd756 driver loads and attempts to register a PCI driver. The PCI & device subsystems assume that no resources have been registered for the device, and the WARN_ON() triggers since amd-rng has already do so. 2. The driver leaks memory because the driver does not attach to a device. The driver only uses the PCI device as a reference. devm_*() functions will release resources on driver detach, which the amd-rng driver will never do. As a result, 3. The driver cannot be reloaded because there is always a use of the ioport and region after the first load of the driver. Revert the changes made by 31b2a73c9c5f ("hwrng: amd - Migrate to managed API"). Cc: Signed-off-by: Prarit Bhargava Fixes: 31b2a73c9c5f ("hwrng: amd - Migrate to managed API"). Cc: Matt Mackall Cc: Corentin LABBE Cc: PrasannaKumar Muralidharan Cc: Wei Yongjun Cc: linux-crypto@vger.kernel.org Cc: linux-geode@lists.infradead.org Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 42 ++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 4a99ac756f08..9959c762da2f 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl); struct amd768_priv { void __iomem *iobase; struct pci_dev *pcidev; + u32 pmbase; }; static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) @@ -148,33 +149,58 @@ found: if (pmbase == 0) return -EIO; - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, - PMBASE_SIZE, DRV_NAME)) { + if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); - return -EBUSY; + err = -EBUSY; + goto out; } - priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, - PMBASE_SIZE); + priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE); if (!priv->iobase) { pr_err(DRV_NAME "Cannot map ioport\n"); - return -ENOMEM; + err = -EINVAL; + goto err_iomap; } amd_rng.priv = (unsigned long)priv; + priv->pmbase = pmbase; priv->pcidev = pdev; pr_info(DRV_NAME " detected\n"); - return devm_hwrng_register(&pdev->dev, &amd_rng); + err = hwrng_register(&amd_rng); + if (err) { + pr_err(DRV_NAME " registering failed (%d)\n", err); + goto err_hwrng; + } + return 0; + +err_hwrng: + ioport_unmap(priv->iobase); +err_iomap: + release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); +out: + kfree(priv); + return err; } static void __exit mod_exit(void) { + struct amd768_priv *priv; + + priv = (struct amd768_priv *)amd_rng.priv; + + hwrng_unregister(&amd_rng); + + ioport_unmap(priv->iobase); + + release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); + + kfree(priv); } module_init(mod_init); -- cgit v1.2.3 From 8c75704ebcac2ffa31ee7bcc359baf701b52bf00 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Tue, 14 Mar 2017 07:36:02 -0400 Subject: hwrng: geode - Revert managed API changes After commit e9afc746299d ("hwrng: geode - Use linux/io.h instead of asm/io.h") the geode-rng driver uses devres with pci_dev->dev to keep track of resources, but does not actually register a PCI driver. This results in the following issues: 1. The driver leaks memory because the driver does not attach to a device. The driver only uses the PCI device as a reference. devm_*() functions will release resources on driver detach, which the geode-rng driver will never do. As a result, 2. The driver cannot be reloaded because there is always a use of the ioport and region after the first load of the driver. Revert the changes made by e9afc746299d ("hwrng: geode - Use linux/io.h instead of asm/io.h"). Cc: Signed-off-by: Prarit Bhargava Fixes: 6e9b5e76882c ("hwrng: geode - Migrate to managed API") Cc: Matt Mackall Cc: Corentin LABBE Cc: PrasannaKumar Muralidharan Cc: Wei Yongjun Cc: linux-crypto@vger.kernel.org Cc: linux-geode@lists.infradead.org Signed-off-by: Herbert Xu --- drivers/char/hw_random/geode-rng.c | 50 ++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index e7a245942029..e1d421a36a13 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -31,6 +31,9 @@ #include #include + +#define PFX KBUILD_MODNAME ": " + #define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_STATUS_REG 0x54 @@ -82,6 +85,7 @@ static struct hwrng geode_rng = { static int __init mod_init(void) { + int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; void __iomem *mem; @@ -89,27 +93,43 @@ static int __init mod_init(void) for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); - if (ent) { - rng_base = pci_resource_start(pdev, 0); - if (rng_base == 0) - return -ENODEV; - - mem = devm_ioremap(&pdev->dev, rng_base, 0x58); - if (!mem) - return -ENOMEM; - geode_rng.priv = (unsigned long)mem; - - pr_info("AMD Geode RNG detected\n"); - return devm_hwrng_register(&pdev->dev, &geode_rng); - } + if (ent) + goto found; } - /* Device not found. */ - return -ENODEV; + goto out; + +found: + rng_base = pci_resource_start(pdev, 0); + if (rng_base == 0) + goto out; + err = -ENOMEM; + mem = ioremap(rng_base, 0x58); + if (!mem) + goto out; + geode_rng.priv = (unsigned long)mem; + + pr_info("AMD Geode RNG detected\n"); + err = hwrng_register(&geode_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", + err); + goto err_unmap; + } +out: + return err; + +err_unmap: + iounmap(mem); + goto out; } static void __exit mod_exit(void) { + void __iomem *mem = (void __iomem *)geode_rng.priv; + + hwrng_unregister(&geode_rng); + iounmap(mem); } module_init(mod_init); -- cgit v1.2.3 From 5c71ad17f97e84d6d7e11a8e193d5d96890ed2ed Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 9 Mar 2017 01:45:39 +0800 Subject: EDAC, pnd2_edac: Add new EDAC driver for Intel SoC platforms Initial target for this driver is the Intel Apollo Lake platform and Denverton micro-server, they use the same internal memory controller IP called Pondicherry2. Memory controller registers are not in PCI config space like earlier Intel memory controllers. For Apollo Lake platform they are accessed via a "side-band" interface, for Denverton micro-server they are access via PCI config space and memory map I/O. This driver is for Apollo Lake and Denverton, but only the Denverton is fully enabled while we wait for the sideband driver. Apollo lake driver and initial cut at Denverton driver by Tony Luck. Extensive cleanup, refactoring and basic verification by Qiuxu Zhuo. Signed-off-by: Tony Luck Signed-off-by: Qiuxu Zhuo Cc: linux-edac Link: http://lkml.kernel.org/r/20170308174539.14432-1-qiuxu.zhuo@intel.com Signed-off-by: Borislav Petkov --- MAINTAINERS | 6 + drivers/edac/Kconfig | 9 + drivers/edac/Makefile | 1 + drivers/edac/pnd2_edac.c | 1542 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/edac/pnd2_edac.h | 301 +++++++++ 5 files changed, 1859 insertions(+) create mode 100644 drivers/edac/pnd2_edac.c create mode 100644 drivers/edac/pnd2_edac.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index c265a5fe4848..572fe4252ac4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4776,6 +4776,12 @@ L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/mpc85xx_edac.[ch] +EDAC-PND2 +M: Tony Luck +L: linux-edac@vger.kernel.org +S: Maintained +F: drivers/edac/pnd2_edac.[ch] + EDAC-PASEMI M: Egor Martovetsky L: linux-edac@vger.kernel.org diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 82d85cce81f8..be3eac6ad54d 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -259,6 +259,15 @@ config EDAC_SKX Support for error detection and correction the Intel Skylake server Integrated Memory Controllers. +config EDAC_PND2 + tristate "Intel Pondicherry2" + depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL + help + Support for error detection and correction on the Intel + Pondicherry2 Integrated Memory Controller. This SoC IP is + first used on the Apollo Lake platform and Denverton + micro-server but may appear on others in the future. + config EDAC_MPC85XX tristate "Freescale MPC83xx / MPC85xx" depends on EDAC_MM_EDAC && FSL_SOC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 88e472e8b9a9..587107e90996 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o obj-$(CONFIG_EDAC_SKX) += skx_edac.o +obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c new file mode 100644 index 000000000000..14d39f05226e --- /dev/null +++ b/drivers/edac/pnd2_edac.c @@ -0,0 +1,1542 @@ +/* + * Driver for Pondicherry2 memory controller. + * + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * [Derived from sb_edac.c] + * + * Translation of system physical addresses to DIMM addresses + * is a two stage process: + * + * First the Pondicherry 2 memory controller handles slice and channel interleaving + * in "sys2pmi()". This is (almost) completley common between platforms. + * + * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM, + * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "edac_mc.h" +#include "edac_module.h" +#include "pnd2_edac.h" + +#define APL_NUM_CHANNELS 4 +#define DNV_NUM_CHANNELS 2 +#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */ + +enum type { + APL, + DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */ +}; + +struct dram_addr { + int chan; + int dimm; + int rank; + int bank; + int row; + int col; +}; + +struct pnd2_pvt { + int dimm_geom[APL_NUM_CHANNELS]; + u64 tolm, tohm; +}; + +/* + * System address space is divided into multiple regions with + * different interleave rules in each. The as0/as1 regions + * have no interleaving at all. The as2 region is interleaved + * between two channels. The mot region is magic and may overlap + * other regions, with its interleave rules taking precedence. + * Addresses not in any of these regions are interleaved across + * all four channels. + */ +static struct region { + u64 base; + u64 limit; + u8 enabled; +} mot, as0, as1, as2; + +static struct dunit_ops { + char *name; + enum type type; + int pmiaddr_shift; + int pmiidx_shift; + int channels; + int dimms_per_channel; + int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name); + int (*get_registers)(void); + int (*check_ecc)(void); + void (*mk_region)(char *name, struct region *rp, void *asym); + void (*get_dimm_config)(struct mem_ctl_info *mci); + int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, + struct dram_addr *daddr, char *msg); +} *ops; + +static struct mem_ctl_info *pnd2_mci; + +#define PND2_MSG_SIZE 256 + +/* Debug macros */ +#define pnd2_printk(level, fmt, arg...) \ + edac_printk(level, "pnd2", fmt, ##arg) + +#define pnd2_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg) + +#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12 +#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13 +#define SELECTOR_DISABLED (-1) +#define _4GB (1ul << 32) + +#define PMI_ADDRESS_WIDTH 31 +#define PND_MAX_PHYS_BIT 39 + +#define APL_ASYMSHIFT 28 +#define DNV_ASYMSHIFT 31 +#define CH_HASH_MASK_LSB 6 +#define SLICE_HASH_MASK_LSB 6 +#define MOT_SLC_INTLV_BIT 12 +#define LOG2_PMI_ADDR_GRANULARITY 5 +#define MOT_SHIFT 24 + +#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo)) +#define U64_LSHIFT(val, s) ((u64)(val) << (s)) + +#ifdef CONFIG_X86_INTEL_SBI_APL +#include "linux/platform_data/sbi_apl.h" +int sbi_send(int port, int off, int op, u32 *data) +{ + struct sbi_apl_message sbi_arg; + int ret, read = 0; + + memset(&sbi_arg, 0, sizeof(sbi_arg)); + + if (op == 0 || op == 4 || op == 6) + read = 1; + else + sbi_arg.data = *data; + + sbi_arg.opcode = op; + sbi_arg.port_address = port; + sbi_arg.register_offset = off; + ret = sbi_apl_commit(&sbi_arg); + if (ret || sbi_arg.status) + edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n", + sbi_arg.status, ret, sbi_arg.data); + + if (ret == 0) + ret = sbi_arg.status; + + if (ret == 0 && read) + *data = sbi_arg.data; + + return ret; +} +#else +int sbi_send(int port, int off, int op, u32 *data) +{ + return -EUNATCH; +} +#endif + +static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) +{ + int ret = 0; + + edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op); + switch (sz) { + case 8: + ret = sbi_send(port, off + 4, op, (u32 *)(data + 4)); + case 4: + ret = sbi_send(port, off, op, (u32 *)data); + pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, + sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret); + break; + } + + return ret; +} + +static u64 get_mem_ctrl_hub_base_addr(void) +{ + struct b_cr_mchbar_lo_pci lo; + struct b_cr_mchbar_hi_pci hi; + struct pci_dev *pdev; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); + if (pdev) { + pci_read_config_dword(pdev, 0x48, (u32 *)&lo); + pci_read_config_dword(pdev, 0x4c, (u32 *)&hi); + pci_dev_put(pdev); + } else { + return 0; + } + + if (!lo.enable) { + edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n"); + return 0; + } + + return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15); +} + +static u64 get_sideband_reg_base_addr(void) +{ + struct pci_dev *pdev; + u32 hi, lo; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL); + if (pdev) { + pci_read_config_dword(pdev, 0x10, &lo); + pci_read_config_dword(pdev, 0x14, &hi); + pci_dev_put(pdev); + return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0)); + } else { + return 0xfd000000; + } +} + +static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) +{ + struct pci_dev *pdev; + char *base; + u64 addr; + + if (op == 4) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); + if (!pdev) + return -ENODEV; + + pci_read_config_dword(pdev, off, data); + pci_dev_put(pdev); + } else { + /* MMIO via memory controller hub base address */ + if (op == 0 && port == 0x4c) { + addr = get_mem_ctrl_hub_base_addr(); + if (!addr) + return -ENODEV; + } else { + /* MMIO via sideband register base address */ + addr = get_sideband_reg_base_addr(); + if (!addr) + return -ENODEV; + addr += (port << 16); + } + + base = ioremap((resource_size_t)addr, 0x10000); + if (!base) + return -ENODEV; + + if (sz == 8) + *(u32 *)(data + 4) = *(u32 *)(base + off + 4); + *(u32 *)data = *(u32 *)(base + off); + + iounmap(base); + } + + edac_dbg(2, "Read %s=%.8x_%.8x\n", name, + (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data); + + return 0; +} + +#define RD_REGP(regp, regname, port) \ + ops->rd_reg(port, \ + regname##_offset, \ + regname##_r_opcode, \ + regp, sizeof(struct regname), \ + #regname) + +#define RD_REG(regp, regname) \ + ops->rd_reg(regname ## _port, \ + regname##_offset, \ + regname##_r_opcode, \ + regp, sizeof(struct regname), \ + #regname) + +static u64 top_lm, top_hm; +static bool two_slices; +static bool two_channels; /* Both PMI channels in one slice enabled */ + +static u8 sym_chan_mask; +static u8 asym_chan_mask; +static u8 chan_mask; + +static int slice_selector = -1; +static int chan_selector = -1; +static u64 slice_hash_mask; +static u64 chan_hash_mask; + +static void mk_region(char *name, struct region *rp, u64 base, u64 limit) +{ + rp->enabled = 1; + rp->base = base; + rp->limit = limit; + edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit); +} + +static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask) +{ + if (mask == 0) { + pr_info(FW_BUG "MOT mask cannot be zero\n"); + return; + } + if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) { + pr_info(FW_BUG "MOT mask not power of two\n"); + return; + } + if (base & ~mask) { + pr_info(FW_BUG "MOT region base/mask alignment error\n"); + return; + } + rp->base = base; + rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0); + rp->enabled = 1; + edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit); +} + +static bool in_region(struct region *rp, u64 addr) +{ + if (!rp->enabled) + return false; + + return rp->base <= addr && addr <= rp->limit; +} + +static int gen_sym_mask(struct b_cr_slice_channel_hash *p) +{ + int mask = 0; + + if (!p->slice_0_mem_disabled) + mask |= p->sym_slice0_channel_enabled; + + if (!p->slice_1_disabled) + mask |= p->sym_slice1_channel_enabled << 2; + + if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) + mask &= 0x5; + + return mask; +} + +static int gen_asym_mask(struct b_cr_slice_channel_hash *p, + struct b_cr_asym_mem_region0_mchbar *as0, + struct b_cr_asym_mem_region1_mchbar *as1, + struct b_cr_asym_2way_mem_region_mchbar *as2way) +{ + const int intlv[] = { 0x5, 0xA, 0x3, 0xC }; + int mask = 0; + + if (as2way->asym_2way_interleave_enable) + mask = intlv[as2way->asym_2way_intlv_mode]; + if (as0->slice0_asym_enable) + mask |= (1 << as0->slice0_asym_channel_select); + if (as1->slice1_asym_enable) + mask |= (4 << as1->slice1_asym_channel_select); + if (p->slice_0_mem_disabled) + mask &= 0xc; + if (p->slice_1_disabled) + mask &= 0x3; + if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) + mask &= 0x5; + + return mask; +} + +static struct b_cr_tolud_pci tolud; +static struct b_cr_touud_lo_pci touud_lo; +static struct b_cr_touud_hi_pci touud_hi; +static struct b_cr_asym_mem_region0_mchbar asym0; +static struct b_cr_asym_mem_region1_mchbar asym1; +static struct b_cr_asym_2way_mem_region_mchbar asym_2way; +static struct b_cr_mot_out_base_mchbar mot_base; +static struct b_cr_mot_out_mask_mchbar mot_mask; +static struct b_cr_slice_channel_hash chash; + +/* Apollo Lake dunit */ +/* + * Validated on board with just two DIMMs in the [0] and [2] positions + * in this array. Other port number matches documentation, but caution + * advised. + */ +static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 }; +static struct d_cr_drp0 drp0[APL_NUM_CHANNELS]; + +/* Denverton dunit */ +static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 }; +static struct d_cr_dsch dsch; +static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS]; +static struct d_cr_drp drp[DNV_NUM_CHANNELS]; +static struct d_cr_dmap dmap[DNV_NUM_CHANNELS]; +static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS]; +static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS]; +static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS]; +static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS]; +static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS]; + +static void apl_mk_region(char *name, struct region *rp, void *asym) +{ + struct b_cr_asym_mem_region0_mchbar *a = asym; + + mk_region(name, rp, + U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT), + U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) + + GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); +} + +static void dnv_mk_region(char *name, struct region *rp, void *asym) +{ + struct b_cr_asym_mem_region_denverton *a = asym; + + mk_region(name, rp, + U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT), + U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) + + GENMASK_ULL(DNV_ASYMSHIFT - 1, 0)); +} + +static int apl_get_registers(void) +{ + int i; + + if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar)) + return -ENODEV; + + for (i = 0; i < APL_NUM_CHANNELS; i++) + if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i])) + return -ENODEV; + + return 0; +} + +static int dnv_get_registers(void) +{ + int i; + + if (RD_REG(&dsch, d_cr_dsch)) + return -ENODEV; + + for (i = 0; i < DNV_NUM_CHANNELS; i++) + if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) || + RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) || + RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) || + RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) || + RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) || + RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) || + RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) || + RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i])) + return -ENODEV; + + return 0; +} + +/* + * Read all the h/w config registers once here (they don't + * change at run time. Figure out which address ranges have + * which interleave characteristics. + */ +static int get_registers(void) +{ + const int intlv[] = { 10, 11, 12, 12 }; + + if (RD_REG(&tolud, b_cr_tolud_pci) || + RD_REG(&touud_lo, b_cr_touud_lo_pci) || + RD_REG(&touud_hi, b_cr_touud_hi_pci) || + RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) || + RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) || + RD_REG(&mot_base, b_cr_mot_out_base_mchbar) || + RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) || + RD_REG(&chash, b_cr_slice_channel_hash)) + return -ENODEV; + + if (ops->get_registers()) + return -ENODEV; + + if (ops->type == DNV) { + /* PMI channel idx (always 0) for asymmetric region */ + asym0.slice0_asym_channel_select = 0; + asym1.slice1_asym_channel_select = 0; + /* PMI channel bitmap (always 1) for symmetric region */ + chash.sym_slice0_channel_enabled = 0x1; + chash.sym_slice1_channel_enabled = 0x1; + } + + if (asym0.slice0_asym_enable) + ops->mk_region("as0", &as0, &asym0); + + if (asym1.slice1_asym_enable) + ops->mk_region("as1", &as1, &asym1); + + if (asym_2way.asym_2way_interleave_enable) { + mk_region("as2way", &as2, + U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT), + U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) + + GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); + } + + if (mot_base.imr_en) { + mk_region_mask("mot", &mot, + U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT), + U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT)); + } + + top_lm = U64_LSHIFT(tolud.tolud, 20); + top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20); + + two_slices = !chash.slice_1_disabled && + !chash.slice_0_mem_disabled && + (chash.sym_slice0_channel_enabled != 0) && + (chash.sym_slice1_channel_enabled != 0); + two_channels = !chash.ch_1_disabled && + !chash.enable_pmi_dual_data_mode && + ((chash.sym_slice0_channel_enabled == 3) || + (chash.sym_slice1_channel_enabled == 3)); + + sym_chan_mask = gen_sym_mask(&chash); + asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way); + chan_mask = sym_chan_mask | asym_chan_mask; + + if (two_slices && !two_channels) { + if (chash.hvm_mode) + slice_selector = 29; + else + slice_selector = intlv[chash.interleave_mode]; + } else if (!two_slices && two_channels) { + if (chash.hvm_mode) + chan_selector = 29; + else + chan_selector = intlv[chash.interleave_mode]; + } else if (two_slices && two_channels) { + if (chash.hvm_mode) { + slice_selector = 29; + chan_selector = 30; + } else { + slice_selector = intlv[chash.interleave_mode]; + chan_selector = intlv[chash.interleave_mode] + 1; + } + } + + if (two_slices) { + if (!chash.hvm_mode) + slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB; + if (!two_channels) + slice_hash_mask |= BIT_ULL(slice_selector); + } + + if (two_channels) { + if (!chash.hvm_mode) + chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB; + if (!two_slices) + chan_hash_mask |= BIT_ULL(chan_selector); + } + + return 0; +} + +/* Get a contiguous memory address (remove the MMIO gap) */ +static u64 remove_mmio_gap(u64 sys) +{ + return (sys < _4GB) ? sys : sys - (_4GB - top_lm); +} + +/* Squeeze out one address bit, shift upper part down to fill gap */ +static void remove_addr_bit(u64 *addr, int bitidx) +{ + u64 mask; + + if (bitidx == -1) + return; + + mask = (1ull << bitidx) - 1; + *addr = ((*addr >> 1) & ~mask) | (*addr & mask); +} + +/* XOR all the bits from addr specified in mask */ +static int hash_by_mask(u64 addr, u64 mask) +{ + u64 result = addr & mask; + + result = (result >> 32) ^ result; + result = (result >> 16) ^ result; + result = (result >> 8) ^ result; + result = (result >> 4) ^ result; + result = (result >> 2) ^ result; + result = (result >> 1) ^ result; + + return (int)result & 1; +} + +/* + * First stage decode. Take the system address and figure out which + * second stage will deal with it based on interleave modes. + */ +static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg) +{ + u64 contig_addr, contig_base, contig_offset, contig_base_adj; + int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : + MOT_CHAN_INTLV_BIT_1SLC_2CH; + int slice_intlv_bit_rm = SELECTOR_DISABLED; + int chan_intlv_bit_rm = SELECTOR_DISABLED; + /* Determine if address is in the MOT region. */ + bool mot_hit = in_region(&mot, addr); + /* Calculate the number of symmetric regions enabled. */ + int sym_channels = hweight8(sym_chan_mask); + + /* + * The amount we need to shift the asym base can be determined by the + * number of enabled symmetric channels. + * NOTE: This can only work because symmetric memory is not supposed + * to do a 3-way interleave. + */ + int sym_chan_shift = sym_channels >> 1; + + /* Give up if address is out of range, or in MMIO gap */ + if (addr >= (1ul << PND_MAX_PHYS_BIT) || + (addr >= top_lm && addr < _4GB) || addr >= top_hm) { + snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr); + return -EINVAL; + } + + /* Get a contiguous memory address (remove the MMIO gap) */ + contig_addr = remove_mmio_gap(addr); + + if (in_region(&as0, addr)) { + *pmiidx = asym0.slice0_asym_channel_select; + + contig_base = remove_mmio_gap(as0.base); + contig_offset = contig_addr - contig_base; + contig_base_adj = (contig_base >> sym_chan_shift) * + ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1); + contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); + } else if (in_region(&as1, addr)) { + *pmiidx = 2u + asym1.slice1_asym_channel_select; + + contig_base = remove_mmio_gap(as1.base); + contig_offset = contig_addr - contig_base; + contig_base_adj = (contig_base >> sym_chan_shift) * + ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1); + contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); + } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) { + bool channel1; + + mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH; + *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1; + channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) : + hash_by_mask(contig_addr, chan_hash_mask); + *pmiidx |= (u32)channel1; + + contig_base = remove_mmio_gap(as2.base); + chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector; + contig_offset = contig_addr - contig_base; + remove_addr_bit(&contig_offset, chan_intlv_bit_rm); + contig_addr = (contig_base >> sym_chan_shift) + contig_offset; + } else { + /* Otherwise we're in normal, boring symmetric mode. */ + *pmiidx = 0u; + + if (two_slices) { + bool slice1; + + if (mot_hit) { + slice_intlv_bit_rm = MOT_SLC_INTLV_BIT; + slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1; + } else { + slice_intlv_bit_rm = slice_selector; + slice1 = hash_by_mask(addr, slice_hash_mask); + } + + *pmiidx = (u32)slice1 << 1; + } + + if (two_channels) { + bool channel1; + + mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : + MOT_CHAN_INTLV_BIT_1SLC_2CH; + + if (mot_hit) { + chan_intlv_bit_rm = mot_intlv_bit; + channel1 = (addr >> mot_intlv_bit) & 1; + } else { + chan_intlv_bit_rm = chan_selector; + channel1 = hash_by_mask(contig_addr, chan_hash_mask); + } + + *pmiidx |= (u32)channel1; + } + } + + /* Remove the chan_selector bit first */ + remove_addr_bit(&contig_addr, chan_intlv_bit_rm); + /* Remove the slice bit (we remove it second because it must be lower */ + remove_addr_bit(&contig_addr, slice_intlv_bit_rm); + *pmiaddr = contig_addr; + + return 0; +} + +/* Translate PMI address to memory (rank, row, bank, column) */ +#define C(n) (0x10 | (n)) /* column */ +#define B(n) (0x20 | (n)) /* bank */ +#define R(n) (0x40 | (n)) /* row */ +#define RS (0x80) /* rank */ + +/* addrdec values */ +#define AMAP_1KB 0 +#define AMAP_2KB 1 +#define AMAP_4KB 2 +#define AMAP_RSVD 3 + +/* dden values */ +#define DEN_4Gb 0 +#define DEN_8Gb 2 + +/* dwid values */ +#define X8 0 +#define X16 1 + +static struct dimm_geometry { + u8 addrdec; + u8 dden; + u8 dwid; + u8 rowbits, colbits; + u16 bits[PMI_ADDRESS_WIDTH]; +} dimms[] = { + { + .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16, + .rowbits = 15, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), + 0, 0, 0, 0 + } + }, + { + .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8, + .rowbits = 16, .colbits = 11, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13), + R(14), R(15), 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16, + .rowbits = 15, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), + 0, 0, 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8, + .rowbits = 16, .colbits = 11, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13), + R(14), R(15), 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16, + .rowbits = 15, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), + 0, 0, 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8, + .rowbits = 16, .colbits = 11, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13), + R(14), R(15), 0, 0 + } + } +}; + +static int bank_hash(u64 pmiaddr, int idx, int shft) +{ + int bhash = 0; + + switch (idx) { + case 0: + bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1; + break; + case 1: + bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1; + bhash ^= ((pmiaddr >> 22) & 1) << 1; + break; + case 2: + bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2; + break; + } + + return bhash; +} + +static int rank_hash(u64 pmiaddr) +{ + return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1; +} + +/* Second stage decode. Compute rank, bank, row & column. */ +static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, + struct dram_addr *daddr, char *msg) +{ + struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx]; + struct pnd2_pvt *pvt = mci->pvt_info; + int g = pvt->dimm_geom[pmiidx]; + struct dimm_geometry *d = &dimms[g]; + int column = 0, bank = 0, row = 0, rank = 0; + int i, idx, type, skiprs = 0; + + for (i = 0; i < PMI_ADDRESS_WIDTH; i++) { + int bit = (pmiaddr >> i) & 1; + + if (i + skiprs >= PMI_ADDRESS_WIDTH) { + snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n"); + return -EINVAL; + } + + type = d->bits[i + skiprs] & ~0xf; + idx = d->bits[i + skiprs] & 0xf; + + /* + * On single rank DIMMs ignore the rank select bit + * and shift remainder of "bits[]" down one place. + */ + if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) { + skiprs = 1; + type = d->bits[i + skiprs] & ~0xf; + idx = d->bits[i + skiprs] & 0xf; + } + + switch (type) { + case C(0): + column |= (bit << idx); + break; + case B(0): + bank |= (bit << idx); + if (cr_drp0->bahen) + bank ^= bank_hash(pmiaddr, idx, d->addrdec); + break; + case R(0): + row |= (bit << idx); + break; + case RS: + rank = bit; + if (cr_drp0->rsien) + rank ^= rank_hash(pmiaddr); + break; + default: + if (bit) { + snprintf(msg, PND2_MSG_SIZE, "Bad translation\n"); + return -EINVAL; + } + goto done; + } + } + +done: + daddr->col = column; + daddr->bank = bank; + daddr->row = row; + daddr->rank = rank; + daddr->dimm = 0; + + return 0; +} + +/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */ +#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out)) + +static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, + struct dram_addr *daddr, char *msg) +{ + /* Rank 0 or 1 */ + daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0); + /* Rank 2 or 3 */ + daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1); + + /* + * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we + * flip them if DIMM1 is larger than DIMM0. + */ + daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip; + + daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0); + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1); + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2); + if (dsch.ddr4en) + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3); + if (dmap1[pmiidx].bxor) { + if (dsch.ddr4en) { + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0); + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1); + if (dsch.chan_width == 0) + /* 64/72 bit dram channel width */ + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); + else + /* 32/40 bit dram channel width */ + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); + daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3); + } else { + daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0); + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1); + if (dsch.chan_width == 0) + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); + else + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); + } + } + + daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11); + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12); + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13); + if (dmap4[pmiidx].row14 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14); + if (dmap4[pmiidx].row15 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15); + if (dmap4[pmiidx].row16 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16); + if (dmap4[pmiidx].row17 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17); + + daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9); + if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f) + daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11); + + return 0; +} + +static int check_channel(int ch) +{ + if (drp0[ch].dramtype != 0) { + pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch); + return 1; + } else if (drp0[ch].eccen == 0) { + pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); + return 1; + } + return 0; +} + +static int apl_check_ecc_active(void) +{ + int i, ret = 0; + + /* Check dramtype and ECC mode for each present DIMM */ + for (i = 0; i < APL_NUM_CHANNELS; i++) + if (chan_mask & BIT(i)) + ret += check_channel(i); + return ret ? -EINVAL : 0; +} + +#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3) + +static int check_unit(int ch) +{ + struct d_cr_drp *d = &drp[ch]; + + if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) { + pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); + return 1; + } + return 0; +} + +static int dnv_check_ecc_active(void) +{ + int i, ret = 0; + + for (i = 0; i < DNV_NUM_CHANNELS; i++) + ret += check_unit(i); + return ret ? -EINVAL : 0; +} + +static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr, + struct dram_addr *daddr, char *msg) +{ + u64 pmiaddr; + u32 pmiidx; + int ret; + + ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg); + if (ret) + return ret; + + pmiaddr >>= ops->pmiaddr_shift; + /* pmi channel idx to dimm channel idx */ + pmiidx >>= ops->pmiidx_shift; + daddr->chan = pmiidx; + + ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg); + if (ret) + return ret; + + edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", + addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col); + + return 0; +} + +static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, + struct dram_addr *daddr) +{ + enum hw_event_mc_err_type tp_event; + char *optype, msg[PND2_MSG_SIZE]; + bool ripv = m->mcgstatus & MCG_STATUS_RIPV; + bool overflow = m->status & MCI_STATUS_OVER; + bool uc_err = m->status & MCI_STATUS_UC; + bool recov = m->status & MCI_STATUS_S; + u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); + u32 mscod = GET_BITFIELD(m->status, 16, 31); + u32 errcode = GET_BITFIELD(m->status, 0, 15); + u32 optypenum = GET_BITFIELD(m->status, 4, 6); + int rc; + + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : + HW_EVENT_ERR_CORRECTED; + + /* + * According with Table 15-9 of the Intel Architecture spec vol 3A, + * memory errors should fit in this mask: + * 000f 0000 1mmm cccc (binary) + * where: + * f = Correction Report Filtering Bit. If 1, subsequent errors + * won't be shown + * mmm = error type + * cccc = channel + * If the mask doesn't match, report an error to the parsing logic + */ + if (!((errcode & 0xef80) == 0x80)) { + optype = "Can't parse: it is not a mem"; + } else { + switch (optypenum) { + case 0: + optype = "generic undef request error"; + break; + case 1: + optype = "memory read error"; + break; + case 2: + optype = "memory write error"; + break; + case 3: + optype = "addr/cmd error"; + break; + case 4: + optype = "memory scrubbing error"; + break; + default: + optype = "reserved"; + break; + } + } + + /* Only decode errors with an valid address (ADDRV) */ + if (!(m->status & MCI_STATUS_ADDRV)) + return; + + rc = get_memory_error_data(mci, m->addr, daddr, msg); + if (rc) + goto address_error; + + snprintf(msg, sizeof(msg), + "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d", + overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod, + errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col); + + edac_dbg(0, "%s\n", msg); + + /* Call the helper to output message */ + edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, + m->addr & ~PAGE_MASK, 0, daddr->chan, 0, -1, optype, msg); + + return; + +address_error: + edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, ""); +} + +static void apl_get_dimm_config(struct mem_ctl_info *mci) +{ + struct pnd2_pvt *pvt = mci->pvt_info; + struct dimm_info *dimm; + struct d_cr_drp0 *d; + u64 capacity; + int i, g; + + for (i = 0; i < APL_NUM_CHANNELS; i++) { + if (!(chan_mask & BIT(i))) + continue; + + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0); + if (!dimm) { + edac_dbg(0, "No allocated DIMM for channel %d\n", i); + continue; + } + + d = &drp0[i]; + for (g = 0; g < ARRAY_SIZE(dimms); g++) + if (dimms[g].addrdec == d->addrdec && + dimms[g].dden == d->dden && + dimms[g].dwid == d->dwid) + break; + + if (g == ARRAY_SIZE(dimms)) { + edac_dbg(0, "Channel %d: unrecognized DIMM\n", i); + continue; + } + + pvt->dimm_geom[i] = g; + capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) * + (1ul << dimms[g].colbits); + edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3)); + dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); + dimm->grain = 32; + dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16; + dimm->mtype = MEM_DDR3; + dimm->edac_mode = EDAC_SECDED; + snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2); + } +} + +static const int dnv_dtypes[] = { + DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN +}; + +static void dnv_get_dimm_config(struct mem_ctl_info *mci) +{ + int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype; + struct dimm_info *dimm; + struct d_cr_drp *d; + u64 capacity; + + if (dsch.ddr4en) { + memtype = MEM_DDR4; + banks = 16; + colbits = 10; + } else { + memtype = MEM_DDR3; + banks = 8; + } + + for (i = 0; i < DNV_NUM_CHANNELS; i++) { + if (dmap4[i].row14 == 31) + rowbits = 14; + else if (dmap4[i].row15 == 31) + rowbits = 15; + else if (dmap4[i].row16 == 31) + rowbits = 16; + else if (dmap4[i].row17 == 31) + rowbits = 17; + else + rowbits = 18; + + if (memtype == MEM_DDR3) { + if (dmap1[i].ca11 != 0x3f) + colbits = 12; + else + colbits = 10; + } + + d = &drp[i]; + /* DIMM0 is present if rank0 and/or rank1 is enabled */ + ranks_of_dimm[0] = d->rken0 + d->rken1; + /* DIMM1 is present if rank2 and/or rank3 is enabled */ + ranks_of_dimm[1] = d->rken2 + d->rken3; + + for (j = 0; j < DNV_MAX_DIMMS; j++) { + if (!ranks_of_dimm[j]) + continue; + + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); + if (!dimm) { + edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j); + continue; + } + + capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits); + edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3)); + dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); + dimm->grain = 32; + dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1]; + dimm->mtype = memtype; + dimm->edac_mode = EDAC_SECDED; + snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j); + } + } +} + +static int pnd2_register_mci(struct mem_ctl_info **ppmci) +{ + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + struct pnd2_pvt *pvt; + int rc; + + rc = ops->check_ecc(); + if (rc < 0) + return rc; + + /* Allocate a new MC control structure */ + layers[0].type = EDAC_MC_LAYER_CHANNEL; + layers[0].size = ops->channels; + layers[0].is_virt_csrow = false; + layers[1].type = EDAC_MC_LAYER_SLOT; + layers[1].size = ops->dimms_per_channel; + layers[1].is_virt_csrow = true; + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); + if (!mci) + return -ENOMEM; + + pvt = mci->pvt_info; + memset(pvt, 0, sizeof(*pvt)); + + mci->mod_name = "pnd2_edac.c"; + mci->dev_name = ops->name; + mci->ctl_name = "Pondicherry2"; + + /* Get dimm basic config and the memory layout */ + ops->get_dimm_config(mci); + + if (edac_mc_add_mc(mci)) { + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); + edac_mc_free(mci); + return -EINVAL; + } + + *ppmci = mci; + + return 0; +} + +static void pnd2_unregister_mci(struct mem_ctl_info *mci) +{ + if (unlikely(!mci || !mci->pvt_info)) { + pnd2_printk(KERN_ERR, "Couldn't find mci handler\n"); + return; + } + + /* Remove MC sysfs nodes */ + edac_mc_del_mc(NULL); + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); + edac_mc_free(mci); +} + +/* + * Callback function registered with core kernel mce code. + * Called once for each logged error. + */ +static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) +{ + struct mce *mce = (struct mce *)data; + struct mem_ctl_info *mci; + struct dram_addr daddr; + char *type; + + if (get_edac_report_status() == EDAC_REPORTING_DISABLED) + return NOTIFY_DONE; + + mci = pnd2_mci; + if (!mci) + return NOTIFY_DONE; + + /* + * Just let mcelog handle it if the error is + * outside the memory controller. A memory error + * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. + * bit 12 has an special meaning. + */ + if ((mce->status & 0xefff) >> 7 != 1) + return NOTIFY_DONE; + + if (mce->mcgstatus & MCG_STATUS_MCIP) + type = "Exception"; + else + type = "Event"; + + pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n"); + pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n", + mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status); + pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc); + pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr); + pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc); + pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", + mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); + + pnd2_mce_output_error(mci, mce, &daddr); + + /* Advice mcelog that the error were handled */ + return NOTIFY_STOP; +} + +static struct notifier_block pnd2_mce_dec = { + .notifier_call = pnd2_mce_check_error, +}; + +#ifdef CONFIG_EDAC_DEBUG +/* + * Write an address to this file to exercise the address decode + * logic in this driver. + */ +static u64 pnd2_fake_addr; +#define PND2_BLOB_SIZE 1024 +static char pnd2_result[PND2_BLOB_SIZE]; +static struct dentry *pnd2_test; +static struct debugfs_blob_wrapper pnd2_blob = { + .data = pnd2_result, + .size = 0 +}; + +static int debugfs_u64_set(void *data, u64 val) +{ + struct dram_addr daddr; + struct mce m; + + *(u64 *)data = val; + m.mcgstatus = 0; + /* ADDRV + MemRd + Unknown channel */ + m.status = MCI_STATUS_ADDRV + 0x9f; + m.addr = val; + pnd2_mce_output_error(pnd2_mci, &m, &daddr); + snprintf(pnd2_blob.data, PND2_BLOB_SIZE, + "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", + m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col); + pnd2_blob.size = strlen(pnd2_blob.data); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); + +static void setup_pnd2_debug(void) +{ + pnd2_test = edac_debugfs_create_dir("pnd2_test"); + edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test, + &pnd2_fake_addr, &fops_u64_wo); + debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob); +} + +static void teardown_pnd2_debug(void) +{ + debugfs_remove_recursive(pnd2_test); +} +#endif + +static int pnd2_probe(void) +{ + int rc; + + edac_dbg(2, "\n"); + rc = get_registers(); + if (rc) + return rc; + + return pnd2_register_mci(&pnd2_mci); +} + +static void pnd2_remove(void) +{ + edac_dbg(0, "\n"); + pnd2_unregister_mci(pnd2_mci); +} + +static struct dunit_ops apl_ops = { + .name = "pnd2/apl", + .type = APL, + .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY, + .pmiidx_shift = 0, + .channels = APL_NUM_CHANNELS, + .dimms_per_channel = 1, + .rd_reg = apl_rd_reg, + .get_registers = apl_get_registers, + .check_ecc = apl_check_ecc_active, + .mk_region = apl_mk_region, + .get_dimm_config = apl_get_dimm_config, + .pmi2mem = apl_pmi2mem, +}; + +static struct dunit_ops dnv_ops = { + .name = "pnd2/dnv", + .type = DNV, + .pmiaddr_shift = 0, + .pmiidx_shift = 1, + .channels = DNV_NUM_CHANNELS, + .dimms_per_channel = 2, + .rd_reg = dnv_rd_reg, + .get_registers = dnv_get_registers, + .check_ecc = dnv_check_ecc_active, + .mk_region = dnv_mk_region, + .get_dimm_config = dnv_get_dimm_config, + .pmi2mem = dnv_pmi2mem, +}; + +static const struct x86_cpu_id pnd2_cpuids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops }, + { } +}; +MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); + +static int __init pnd2_init(void) +{ + const struct x86_cpu_id *id; + int rc; + + edac_dbg(2, "\n"); + + id = x86_match_cpu(pnd2_cpuids); + if (!id) + return -ENODEV; + + ops = (struct dunit_ops *)id->driver_data; + + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + + rc = pnd2_probe(); + if (rc < 0) { + pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc); + return rc; + } + + if (!pnd2_mci) + return -ENODEV; + + mce_register_decode_chain(&pnd2_mce_dec); + setup_pnd2_debug(); + + return 0; +} + +static void __exit pnd2_exit(void) +{ + edac_dbg(2, "\n"); + teardown_pnd2_debug(); + mce_unregister_decode_chain(&pnd2_mce_dec); + pnd2_remove(); +} + +module_init(pnd2_init); +module_exit(pnd2_exit); + +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller"); diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h new file mode 100644 index 000000000000..61b6e79492bb --- /dev/null +++ b/drivers/edac/pnd2_edac.h @@ -0,0 +1,301 @@ +/* + * Register bitfield descriptions for Pondicherry2 memory controller. + * + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _PND2_REGS_H +#define _PND2_REGS_H + +struct b_cr_touud_lo_pci { + u32 lock : 1; + u32 reserved_1 : 19; + u32 touud : 12; +}; + +#define b_cr_touud_lo_pci_port 0x4c +#define b_cr_touud_lo_pci_offset 0xa8 +#define b_cr_touud_lo_pci_r_opcode 0x04 + +struct b_cr_touud_hi_pci { + u32 touud : 7; + u32 reserved_0 : 25; +}; + +#define b_cr_touud_hi_pci_port 0x4c +#define b_cr_touud_hi_pci_offset 0xac +#define b_cr_touud_hi_pci_r_opcode 0x04 + +struct b_cr_tolud_pci { + u32 lock : 1; + u32 reserved_0 : 19; + u32 tolud : 12; +}; + +#define b_cr_tolud_pci_port 0x4c +#define b_cr_tolud_pci_offset 0xbc +#define b_cr_tolud_pci_r_opcode 0x04 + +struct b_cr_mchbar_lo_pci { + u32 enable : 1; + u32 pad_3_1 : 3; + u32 pad_14_4: 11; + u32 base: 17; +}; + +struct b_cr_mchbar_hi_pci { + u32 base : 7; + u32 pad_31_7 : 25; +}; + +/* Symmetric region */ +struct b_cr_slice_channel_hash { + u64 slice_1_disabled : 1; + u64 hvm_mode : 1; + u64 interleave_mode : 2; + u64 slice_0_mem_disabled : 1; + u64 reserved_0 : 1; + u64 slice_hash_mask : 14; + u64 reserved_1 : 11; + u64 enable_pmi_dual_data_mode : 1; + u64 ch_1_disabled : 1; + u64 reserved_2 : 1; + u64 sym_slice0_channel_enabled : 2; + u64 sym_slice1_channel_enabled : 2; + u64 ch_hash_mask : 14; + u64 reserved_3 : 11; + u64 lock : 1; +}; + +#define b_cr_slice_channel_hash_port 0x4c +#define b_cr_slice_channel_hash_offset 0x4c58 +#define b_cr_slice_channel_hash_r_opcode 0x06 + +struct b_cr_mot_out_base_mchbar { + u32 reserved_0 : 14; + u32 mot_out_base : 15; + u32 reserved_1 : 1; + u32 tr_en : 1; + u32 imr_en : 1; +}; + +#define b_cr_mot_out_base_mchbar_port 0x4c +#define b_cr_mot_out_base_mchbar_offset 0x6af0 +#define b_cr_mot_out_base_mchbar_r_opcode 0x00 + +struct b_cr_mot_out_mask_mchbar { + u32 reserved_0 : 14; + u32 mot_out_mask : 15; + u32 reserved_1 : 1; + u32 ia_iwb_en : 1; + u32 gt_iwb_en : 1; +}; + +#define b_cr_mot_out_mask_mchbar_port 0x4c +#define b_cr_mot_out_mask_mchbar_offset 0x6af4 +#define b_cr_mot_out_mask_mchbar_r_opcode 0x00 + +struct b_cr_asym_mem_region0_mchbar { + u32 pad : 4; + u32 slice0_asym_base : 11; + u32 pad_18_15 : 4; + u32 slice0_asym_limit : 11; + u32 slice0_asym_channel_select : 1; + u32 slice0_asym_enable : 1; +}; + +#define b_cr_asym_mem_region0_mchbar_port 0x4c +#define b_cr_asym_mem_region0_mchbar_offset 0x6e40 +#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00 + +struct b_cr_asym_mem_region1_mchbar { + u32 pad : 4; + u32 slice1_asym_base : 11; + u32 pad_18_15 : 4; + u32 slice1_asym_limit : 11; + u32 slice1_asym_channel_select : 1; + u32 slice1_asym_enable : 1; +}; + +#define b_cr_asym_mem_region1_mchbar_port 0x4c +#define b_cr_asym_mem_region1_mchbar_offset 0x6e44 +#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00 + +/* Some bit fields moved in above two structs on Denverton */ +struct b_cr_asym_mem_region_denverton { + u32 pad : 4; + u32 slice_asym_base : 8; + u32 pad_19_12 : 8; + u32 slice_asym_limit : 8; + u32 pad_28_30 : 3; + u32 slice_asym_enable : 1; +}; + +struct b_cr_asym_2way_mem_region_mchbar { + u32 pad : 2; + u32 asym_2way_intlv_mode : 2; + u32 asym_2way_base : 11; + u32 pad_16_15 : 2; + u32 asym_2way_limit : 11; + u32 pad_30_28 : 3; + u32 asym_2way_interleave_enable : 1; +}; + +#define b_cr_asym_2way_mem_region_mchbar_port 0x4c +#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50 +#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00 + +/* Apollo Lake d-unit */ + +struct d_cr_drp0 { + u32 rken0 : 1; + u32 rken1 : 1; + u32 ddmen : 1; + u32 rsvd3 : 1; + u32 dwid : 2; + u32 dden : 3; + u32 rsvd13_9 : 5; + u32 rsien : 1; + u32 bahen : 1; + u32 rsvd18_16 : 3; + u32 caswizzle : 2; + u32 eccen : 1; + u32 dramtype : 3; + u32 blmode : 3; + u32 addrdec : 2; + u32 dramdevice_pr : 2; +}; + +#define d_cr_drp0_offset 0x1400 +#define d_cr_drp0_r_opcode 0x00 + +/* Denverton d-unit */ + +struct d_cr_dsch { + u32 ch0en : 1; + u32 ch1en : 1; + u32 ddr4en : 1; + u32 coldwake : 1; + u32 newbypdis : 1; + u32 chan_width : 1; + u32 rsvd6_6 : 1; + u32 ooodis : 1; + u32 rsvd18_8 : 11; + u32 ic : 1; + u32 rsvd31_20 : 12; +}; + +#define d_cr_dsch_port 0x16 +#define d_cr_dsch_offset 0x0 +#define d_cr_dsch_r_opcode 0x0 + +struct d_cr_ecc_ctrl { + u32 eccen : 1; + u32 rsvd31_1 : 31; +}; + +#define d_cr_ecc_ctrl_offset 0x180 +#define d_cr_ecc_ctrl_r_opcode 0x0 + +struct d_cr_drp { + u32 rken0 : 1; + u32 rken1 : 1; + u32 rken2 : 1; + u32 rken3 : 1; + u32 dimmdwid0 : 2; + u32 dimmdden0 : 2; + u32 dimmdwid1 : 2; + u32 dimmdden1 : 2; + u32 rsvd15_12 : 4; + u32 dimmflip : 1; + u32 rsvd31_17 : 15; +}; + +#define d_cr_drp_offset 0x158 +#define d_cr_drp_r_opcode 0x0 + +struct d_cr_dmap { + u32 ba0 : 5; + u32 ba1 : 5; + u32 bg0 : 5; /* if ddr3, ba2 = bg0 */ + u32 bg1 : 5; /* if ddr3, ba3 = bg1 */ + u32 rs0 : 5; + u32 rs1 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap_offset 0x174 +#define d_cr_dmap_r_opcode 0x0 + +struct d_cr_dmap1 { + u32 ca11 : 6; + u32 bxor : 1; + u32 rsvd : 25; +}; + +#define d_cr_dmap1_offset 0xb4 +#define d_cr_dmap1_r_opcode 0x0 + +struct d_cr_dmap2 { + u32 row0 : 5; + u32 row1 : 5; + u32 row2 : 5; + u32 row3 : 5; + u32 row4 : 5; + u32 row5 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap2_offset 0x148 +#define d_cr_dmap2_r_opcode 0x0 + +struct d_cr_dmap3 { + u32 row6 : 5; + u32 row7 : 5; + u32 row8 : 5; + u32 row9 : 5; + u32 row10 : 5; + u32 row11 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap3_offset 0x14c +#define d_cr_dmap3_r_opcode 0x0 + +struct d_cr_dmap4 { + u32 row12 : 5; + u32 row13 : 5; + u32 row14 : 5; + u32 row15 : 5; + u32 row16 : 5; + u32 row17 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap4_offset 0x150 +#define d_cr_dmap4_r_opcode 0x0 + +struct d_cr_dmap5 { + u32 ca3 : 4; + u32 ca4 : 4; + u32 ca5 : 4; + u32 ca6 : 4; + u32 ca7 : 4; + u32 ca8 : 4; + u32 ca9 : 4; + u32 rsvd : 4; +}; + +#define d_cr_dmap5_offset 0x154 +#define d_cr_dmap5_r_opcode 0x0 + +#endif /* _PND2_REGS_H */ -- cgit v1.2.3 From 40ceda09c8c84694c2ca6b00bcc6dc71e8e62d96 Mon Sep 17 00:00:00 2001 From: yong mao Date: Sat, 4 Mar 2017 15:10:03 +0800 Subject: mmc: mediatek: Fixed bug where clock frequency could be set wrong This patch can fix two issues: Issue 1: In previous code, div may be overflow when setting clock frequency as f_min. We can use DIV_ROUND_UP to fix this boundary related issue. Issue 2: In previous code, we can not set the correct clock frequency when div equals 0xff. Signed-off-by: Yong Mao Signed-off-by: Chaotian Jing Reviewed-by: Daniel Kurtz Signed-off-by: Ulf Hansson --- drivers/mmc/host/mtk-sd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8e32580c12b5..b235d8da0602 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) } } sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, - (mode << 8) | (div % 0xff)); + (mode << 8) | div); sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); @@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev) host->src_clk_freq = clk_get_rate(host->src_clk); /* Set host parameters to mmc */ mmc->ops = &mt_msdc_ops; - mmc->f_min = host->src_clk_freq / (4 * 255); + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; /* MMC core transfer sizes tunable parameters */ -- cgit v1.2.3 From 66822d815ae61ecb2d9dba9031517e8a8476969d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 15 Mar 2017 21:11:46 -0400 Subject: drm/radeon: reinstate oland workaround for sclk Higher sclks seem to be unstable on some boards. bug: https://bugs.freedesktop.org/show_bug.cgi?id=100222 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/si_dpm.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 72e1588580a1..c7af9fdd20c7 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2985,9 +2985,13 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, max_sclk = 75000; } } else if (rdev->family == CHIP_OLAND) { - if ((rdev->pdev->device == 0x6604) && - (rdev->pdev->subsystem_vendor == 0x1028) && - (rdev->pdev->subsystem_device == 0x066F)) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) { max_sclk = 75000; } } -- cgit v1.2.3 From e11ddff68a7c455e63c4b46154a3e75c699a7b55 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 15 Mar 2017 21:13:25 -0400 Subject: drm/amdgpu: reinstate oland workaround for sclk Higher sclks seem to be unstable on some boards. bug: https://bugs.freedesktop.org/show_bug.cgi?id=100222 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 33b504bafb88..c5dec210d529 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3465,9 +3465,13 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, max_sclk = 75000; } } else if (adev->asic_type == CHIP_OLAND) { - if ((adev->pdev->device == 0x6604) && - (adev->pdev->subsystem_vendor == 0x1028) && - (adev->pdev->subsystem_device == 0x066F)) { + if ((adev->pdev->revision == 0xC7) || + (adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605)) { max_sclk = 75000; } } -- cgit v1.2.3 From 60a970a6c58dedb4c6b2d90b75f8d6ad7c7b34dc Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Wed, 15 Mar 2017 10:13:32 +0800 Subject: drm/amdgpu: fix the clearing wb size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The clearing wb size should be the one that it is assigned. Signed-off-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a3a105ec99e2..de0cf3315484 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) memset(&adev->wb.used, 0, sizeof(adev->wb.used)); /* clear wb memory */ - memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); + memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); } return 0; -- cgit v1.2.3 From 8bcd37d8b21de13d068414c018c9599281294a01 Mon Sep 17 00:00:00 2001 From: "Winkler, Tomas" Date: Thu, 9 Mar 2017 16:58:21 +0200 Subject: mmc: core: mmc_blk_rw_cmd_err - remove unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix compilation warning: drivers/mmc/core/block.c:1563:24: warning: variable ‘mq_rq’ set but not used [-Wunused-but-set-variable] struct mmc_queue_req *mq_rq; Signed-off-by: Tomas Winkler Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 05afefcfb611..ff3da960c473 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, bool old_req_pending) { - struct mmc_queue_req *mq_rq; bool req_pending; - mq_rq = container_of(brq, struct mmc_queue_req, brq); - /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. -- cgit v1.2.3 From 2ce0c7b65505e0d915e99389cced45b478dc935d Mon Sep 17 00:00:00 2001 From: Romain Izard Date: Thu, 9 Mar 2017 16:18:20 +0100 Subject: mmc: sdhci-of-at91: Support external regulators The SDHCI controller in the SAMA5D2 chip requires a valid voltage set in the power control register, otherwise commands will fail with a timeout error. When using the regulator framework to specify the regulator used by the mmc device, the voltage is not configured, and it is not possible to use the connected device. Implement a custom 'set_power' function for this specific hardware, that configures the voltage in the register in all cases. Signed-off-by: Romain Izard Acked-by: Ludovic Desroches Cc: stable@vger.kernel.org #4.9+ Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-at91.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 2f9ad213377a..7fd964256faa 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -85,11 +85,30 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); } +/* + * In this specific implementation of the SDHCI controller, the power register + * needs to have a valid voltage set even when the power supply is managed by + * an external regulator. + */ +static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + if (!IS_ERR(host->mmc->supply.vmmc)) { + struct mmc_host *mmc = host->mmc; + + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + } + sdhci_set_power_noreg(host, mode, vdd); +} + static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_clock = sdhci_at91_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_at91_set_power, }; static const struct sdhci_pltfm_data soc_data_sama5d2 = { -- cgit v1.2.3 From 181302dc7239add8ab1449c23ecab193f52ee6ab Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Mar 2017 13:40:22 +0100 Subject: mmc: ushc: fix NULL-deref at probe Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Fixes: 53f3a9e26ed5 ("mmc: USB SD Host Controller (USHC) driver") Cc: stable # 2.6.37 Cc: David Vrabel Signed-off-by: Johan Hovold Signed-off-by: Ulf Hansson --- drivers/mmc/host/ushc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index d2c386f09d69..1d843357422e 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id struct ushc_data *ushc; int ret; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) return -ENOMEM; -- cgit v1.2.3 From e4c5d3762e2d6d274bd1cc948c47063becfa2103 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 27 Feb 2017 18:44:45 +0200 Subject: nvme-loop: fix a possible use-after-free when destroying the admin queue we need to destroy the nvmet sq and let it finish gracefully before continue to cleanup the queue. Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg --- drivers/nvme/target/loop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d1f06e7768ff..74e04a0855d4 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -288,9 +288,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); - nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); } static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) -- cgit v1.2.3 From d11ea004a458b982e19b188c386e25a9b66ec446 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 6 Mar 2017 18:46:20 +0200 Subject: nvmet: confirm sq percpu has scheduled and switched to atomic percpu_ref_kill is not enough to prevent subsequent percpu_ref_tryget_live from failing. Hence call perfcpu_ref_kill_confirm to make it safe. Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg --- drivers/nvme/target/core.c | 11 ++++++++++- drivers/nvme/target/nvmet.h | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 11b0a0a5f661..798653b329b2 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, ctrl->sqs[qid] = sq; } +static void nvmet_confirm_sq(struct percpu_ref *ref) +{ + struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); + + complete(&sq->confirm_done); +} + void nvmet_sq_destroy(struct nvmet_sq *sq) { /* @@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) */ if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) nvmet_async_events_free(sq->ctrl); - percpu_ref_kill(&sq->ref); + percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); + wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); @@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) return ret; } init_completion(&sq->free_done); + init_completion(&sq->confirm_done); return 0; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 1370eee0a3c0..f7ff15f17ca9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -73,6 +73,7 @@ struct nvmet_sq { u16 qid; u16 size; struct completion free_done; + struct completion confirm_done; }; /** -- cgit v1.2.3 From b25634e2a051bef4b2524b11adddfbfa6448f6cd Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 9 Mar 2017 13:45:52 +0200 Subject: nvmet-rdma: Fix a possible uninitialized variable dereference When handling a new recv command, we grab a new rsp resource and check for the queue state being live. In case the queue is not in live state, we simply restore the rsp back to the free list. However in this flow we didn't set rsp->queue yet, so we cannot dereference it. Instead, make sure to initialize rsp->queue (and other rsp members) as soon as possible so we won't reference uninitialized variables. Reported-by: Yi Zhang Reported-by: Raju Rangoju Reviewed-by: Christoph Hellwig Tested-by: Raju Rangoju Signed-off-by: Sagi Grimberg --- drivers/nvme/target/rdma.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 9aa1da3778b3..ecc4fe862561 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, { u16 status; - cmd->queue = queue; - cmd->n_rdma = 0; - cmd->req.port = queue->port; - - ib_dma_sync_single_for_cpu(queue->dev->device, cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, DMA_FROM_DEVICE); @@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); + rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0; rsp->req.cmd = cmd->nvme_cmd; + rsp->req.port = queue->port; + rsp->n_rdma = 0; if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { unsigned long flags; -- cgit v1.2.3 From 59cf8bed44a79ec42303151dd014fdb6434254bb Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:34:02 -0700 Subject: Input: iforce - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer or accessing memory that lie beyond the end of the endpoint array should a malicious device lack the expected endpoints. Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/joystick/iforce/iforce-usb.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index d96aa27dfcdc..db64adfbe1af 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf, interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints < 2) + return -ENODEV; + epirq = &interface->endpoint[0].desc; epout = &interface->endpoint[1].desc; -- cgit v1.2.3 From ac2ee9ba953afe88f7a673e1c0c839227b1d7891 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:35:12 -0700 Subject: Input: cm109 - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Fixes: c04148f915e5 ("Input: add driver for USB VoIP phones with CM109...") Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org # 2.6.28 Signed-off-by: Dmitry Torokhov --- drivers/input/misc/cm109.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index 9cc6d057c302..23c191a2a071 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf, int error = -ENOMEM; interface = intf->cur_altsetting; + + if (interface->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) -- cgit v1.2.3 From 1916d319271664241b7aa0cd2b05e32bdb310ce9 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:36:13 -0700 Subject: Input: ims-pcu - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack control-interface endpoints. Fixes: 628329d52474 ("Input: add IMS Passenger Control Unit driver") Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org # 3.10 Signed-off-by: Dmitry Torokhov --- drivers/input/misc/ims-pcu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 9c0ea36913b4..f4e8fbec6a94 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + pcu->ep_ctrl = &alt->endpoint[0].desc; pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); -- cgit v1.2.3 From 5cc4a1a9f5c179795c8a1f2b0f4361829d6a070e Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:37:01 -0700 Subject: Input: yealink - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Fixes: aca951a22a1d ("[PATCH] input-driver-yealink-P1K-usb-phone") Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org # 2.6.14 Signed-off-by: Dmitry Torokhov --- drivers/input/misc/yealink.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 79c964c075f1..6e7ff9561d92 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c @@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) int ret, pipe, i; interface = intf->cur_altsetting; + + if (interface->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; -- cgit v1.2.3 From ba340d7b83703768ce566f53f857543359aa1b98 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:39:29 -0700 Subject: Input: hanwang - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Fixes: bba5394ad3bd ("Input: add support for Hanwang tablets") Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org # 2.6.37 Signed-off-by: Dmitry Torokhov --- drivers/input/tablet/hanwang.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c index cd852059b99e..df4bea96d7ed 100644 --- a/drivers/input/tablet/hanwang.c +++ b/drivers/input/tablet/hanwang.c @@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id int error; int i; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); input_dev = input_allocate_device(); if (!hanwang || !input_dev) { -- cgit v1.2.3 From cb1b494663e037253337623bf1ef2df727883cb7 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:41:55 -0700 Subject: Input: kbtab - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer should a malicious device lack endpoints. Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/tablet/kbtab.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index e850d7e8afbc..4d9d64908b59 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i struct input_dev *input_dev; int error = -ENOMEM; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) -- cgit v1.2.3 From 92461f5d723037530c1f36cce93640770037812c Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Mar 2017 11:43:09 -0700 Subject: Input: sur40 - validate number of endpoints before using them Make sure to check the number of endpoints to avoid dereferencing a NULL-pointer or accessing memory that lie beyond the end of the endpoint array should a malicious device lack the expected endpoints. Fixes: bdb5c57f209c ("Input: add sur40 driver for Samsung SUR40... ") Signed-off-by: Johan Hovold Cc: stable@vger.kernel.org # 3.13 Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/sur40.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index aefb6e11f88a..4c0eecae065c 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface, if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; + if (iface_desc->desc.bNumEndpoints < 5) + return -ENODEV; + /* Use endpoint #4 (0x86). */ endpoint = &iface_desc->endpoint[4].desc; if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) -- cgit v1.2.3 From 15c9e10d9ad4d41d076148bbff1de7f659f68852 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 16 Mar 2017 16:40:33 -0700 Subject: drivers core: remove assert_held_device_hotplug() The last caller of assert_held_device_hotplug() is gone, so remove it again. Link: http://lkml.kernel.org/r/20170314125226.16779-3-heiko.carstens@de.ibm.com Signed-off-by: Heiko Carstens Acked-by: Dan Williams Cc: Michal Hocko Cc: "Rafael J. Wysocki" Cc: Vladimir Davydov Cc: Ben Hutchings Cc: Gerald Schaefer Cc: Martin Schwidefsky Cc: Sebastian Ott Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/core.c | 5 ----- include/linux/device.h | 1 - 2 files changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/base/core.c b/drivers/base/core.c index 684bda4d14a1..6bb60fb6a30b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void) return restart_syscall(); } -void assert_held_device_hotplug(void) -{ - lockdep_assert_held(&device_hotplug_lock); -} - #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { diff --git a/include/linux/device.h b/include/linux/device.h index 30c4570e928d..9ef518af5515 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev) extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); -void assert_held_device_hotplug(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); -- cgit v1.2.3 From 4cbe4dac82e423ecc9a0ba46af24a860853259f4 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 13 Mar 2017 19:29:08 +0200 Subject: net/mlx4_core: Avoid delays during VF driver device shutdown Some Hypervisors detach VFs from VMs by instantly causing an FLR event to be generated for a VF. In the mlx4 case, this will cause that VF's comm channel to be disabled before the VM has an opportunity to invoke the VF device's "shutdown" method. For such Hypervisors, there is a race condition between the VF's shutdown method and its internal-error detection/reset thread. The internal-error detection/reset thread (which runs every 5 seconds) also detects a disabled comm channel. If the internal-error detection/reset flow wins the race, we still get delays (while that flow tries repeatedly to detect comm-channel recovery). The cited commit fixed the command timeout problem when the internal-error detection/reset flow loses the race. This commit avoids the unneeded delays when the internal-error detection/reset flow wins. Fixes: d585df1c5ccf ("net/mlx4_core: Avoid command timeouts during VF driver device shutdown") Signed-off-by: Jack Morgenstein Reported-by: Simon Xiao Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 11 +++++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 11 +++++++++++ include/linux/mlx4/device.h | 1 + 3 files changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e8c105164931..0e0fa7030565 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev) rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { /* PCI might be offline */ + + /* If device removal has been requested, + * do not continue retrying. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_NOWAIT) { + mlx4_warn(dev, + "communication channel is offline\n"); + return -EIO; + } + msleep(100); wr_toggle = swab32(readl(&priv->mfunc.comm-> slave_write)); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 21377c315083..703205475524 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev) (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); if (!offline_bit) return 0; + + /* If device removal has been requested, + * do not continue retrying. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_NOWAIT) + break; + /* There are cases as part of AER/Reset flow that PF needs * around 100 msec to load. We therefore sleep for 100 msec * to allow other tasks to make use of that CPU during this @@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; + if (mlx4_is_slave(dev)) + persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; + mutex_lock(&persist->interface_state_mutex); persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; mutex_unlock(&persist->interface_state_mutex); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7e66e4f62858..1beb1ec2fbdf 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -476,6 +476,7 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, + MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ -- cgit v1.2.3 From bc9ab9231ec8c08352ea860480523d88a221a68f Mon Sep 17 00:00:00 2001 From: David Arcari Date: Mon, 13 Mar 2017 19:07:16 -0400 Subject: net: ethernet: aquantia: set net_device mtu when mtu is changed When the aquantia device mtu is changed the net_device structure is not updated. As a result the ip command does not properly reflect the mtu change. Commit 5513e16421cb incorrectly assumed that __dev_set_mtu() was making the assignment ndev->mtu = new_mtu; This is not true in the case where the driver has a ndo_change_mtu routine. Fixes: 5513e16421cb ("net: ethernet: aquantia: Fixes for aq_ndev_change_mtu") Cc: Pavel Belous Signed-off-by: David Arcari Tested-by: Pavel Belous Reviewed-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index dad63623be6a..d05fbfdce5e5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -98,6 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) if (err < 0) goto err_exit; + ndev->mtu = new_mtu; if (netif_running(ndev)) { aq_ndev_close(ndev); -- cgit v1.2.3 From 687e0687f71ec00e0132a21fef802dee88c2f1ad Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 14 Mar 2017 17:55:45 +0100 Subject: USB: usbtmc: add missing endpoint sanity check USBTMC devices are required to have a bulk-in and a bulk-out endpoint, but the driver failed to verify this, something which could lead to the endpoint addresses being taken from uninitialised memory. Make sure to zero all private data as part of allocation, and add the missing endpoint sanity check. Note that this also addresses a more recently introduced issue, where the interrupt-in-presence flag would also be uninitialised whenever the optional interrupt-in endpoint is not present. This in turn could lead to an interrupt urb being allocated, initialised and submitted based on uninitialised values. Fixes: dbf3e7f654c0 ("Implement an ioctl to support the USMTMC-USB488 READ_STATUS_BYTE operation.") Fixes: 5b775f672cc9 ("USB: add USB test and measurement class driver") Cc: stable # 2.6.28 Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/usbtmc.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index f03692ec5520..5e3446db4513 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "%s called\n", __func__); - data = kmalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf, break; } } + + if (!data->bulk_out || !data->bulk_in) { + dev_err(&intf->dev, "bulk endpoints not found\n"); + retcode = -ENODEV; + goto err_put; + } + /* Find int endpoint */ for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { endpoint = &iface_desc->endpoint[n].desc; @@ -1512,6 +1519,7 @@ error_register: sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); usbtmc_free_int(data); +err_put: kref_put(&data->kref, usbtmc_delete); return retcode; } -- cgit v1.2.3 From 2e47c53503eb9faff42b3cfa144a833344dd1f89 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 14 Mar 2017 17:55:46 +0100 Subject: USB: usbtmc: fix probe error path Make sure to initialise the return value to avoid having allocation failures going unnoticed when allocating interrupt-endpoint resources. This prevents use-after-free or worse when the device is later unbound. Fixes: dbf3e7f654c0 ("Implement an ioctl to support the USMTMC-USB488 READ_STATUS_BYTE operation.") Cc: stable # 4.6 Cc: Dave Penkler Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/usbtmc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 5e3446db4513..8fb309a0ff6b 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1476,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf, if (data->iin_ep_present) { /* allocate int urb */ data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!data->iin_urb) + if (!data->iin_urb) { + retcode = -ENOMEM; goto error_register; + } /* Protect interrupt in endpoint data until iin_urb is freed */ kref_get(&data->kref); @@ -1485,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf, /* allocate buffer for interrupt in */ data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, GFP_KERNEL); - if (!data->iin_buffer) + if (!data->iin_buffer) { + retcode = -ENOMEM; goto error_register; + } /* fill interrupt urb */ usb_fill_int_urb(data->iin_urb, data->usb_dev, -- cgit v1.2.3 From cdd7928df0d2efaa3270d711963773a08a4cc8ab Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 14 Mar 2017 12:09:56 +0100 Subject: ACM gadget: fix endianness in notifications MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The gadget code exports the bitfield for serial status changes over the wire in its internal endianness. The fix is to convert to little endian before sending it over the wire. Signed-off-by: Oliver Neukum Tested-by: 家瑋 CC: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_acm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index a30766ca4226..5e3828d9dac7 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm) { struct usb_composite_dev *cdev = acm->port.func.config->cdev; int status; + __le16 serial_state; spin_lock(&acm->lock); if (acm->notify_req) { dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", acm->port_num, acm->serial_state); + serial_state = cpu_to_le16(acm->serial_state); status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, - 0, &acm->serial_state, sizeof(acm->serial_state)); + 0, &serial_state, sizeof(acm->serial_state)); } else { acm->pending = true; status = 0; -- cgit v1.2.3 From 9501df3cd9204f5859f649182431616a31ee88a1 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Wed, 15 Mar 2017 23:38:07 -0400 Subject: ibmvnic: Free tx/rx scrq pointer array when releasing sub-crqs The pointer array for the tx/rx sub crqs should be free'ed when releasing the tx/rx sub crqs. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5f11b4dc95d2..b23d6545f835 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) release_sub_crq_queue(adapter, adapter->tx_scrq[i]); } + kfree(adapter->tx_scrq); adapter->tx_scrq = NULL; } @@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) release_sub_crq_queue(adapter, adapter->rx_scrq[i]); } + kfree(adapter->rx_scrq); adapter->rx_scrq = NULL; } } -- cgit v1.2.3 From e14b4db7a567ff507453ecd9c64da51bbc2b6d23 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Thu, 16 Mar 2017 12:21:32 -0700 Subject: netvsc: fix race during initialization When device is being setup on boot, there is a small race where network device callback is registered, but the netvsc_device pointer is not set yet. This can cause a NULL ptr dereference if packet arrives during this window. Fixes: 46b4f7f5d1f7 ("netvsc: eliminate per-device outstanding send counter") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4c1d8cca247b..8dd0b8770328 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context) return; net_device = net_device_to_netvsc_device(ndev); - if (unlikely(net_device->destroy) && - netvsc_channel_idle(net_device, q_idx)) + if (unlikely(!net_device)) + return; + + if (unlikely(net_device->destroy && + netvsc_channel_idle(net_device, q_idx))) return; /* commit_rd_index() -> hv_signal_on_read() needs this. */ -- cgit v1.2.3 From 7b2db29fbb4e766fcd02207eb2e2087170bd6ebc Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 8 Mar 2017 10:19:36 -0800 Subject: usb: hub: Fix crash after failure to read BOS descriptor If usb_get_bos_descriptor() returns an error, usb->bos will be NULL. Nevertheless, it is dereferenced unconditionally in hub_set_initial_usb2_lpm_policy() if usb2_hw_lpm_capable is set. This results in a crash. usb 5-1: unable to get BOS descriptor ... Unable to handle kernel NULL pointer dereference at virtual address 00000008 pgd = ffffffc00165f000 [00000008] *pgd=000000000174f003, *pud=000000000174f003, *pmd=0000000001750003, *pte=00e8000001751713 Internal error: Oops: 96000005 [#1] PREEMPT SMP Modules linked in: uinput uvcvideo videobuf2_vmalloc cmac [ ... ] CPU: 5 PID: 3353 Comm: kworker/5:3 Tainted: G B 4.4.52 #480 Hardware name: Google Kevin (DT) Workqueue: events driver_set_config_work task: ffffffc0c3690000 ti: ffffffc0ae9a8000 task.ti: ffffffc0ae9a8000 PC is at hub_port_init+0xc3c/0xd10 LR is at hub_port_init+0xc3c/0xd10 ... Call trace: [] hub_port_init+0xc3c/0xd10 [] usb_reset_and_verify_device+0x15c/0x82c [] usb_reset_device+0xe4/0x298 [] rtl8152_probe+0x84/0x9b0 [r8152] [] usb_probe_interface+0x244/0x2f8 [] driver_probe_device+0x180/0x3b4 [] __device_attach_driver+0xb4/0xe0 [] bus_for_each_drv+0xb4/0xe4 [] __device_attach+0xd0/0x158 [] device_initial_probe+0x24/0x30 [] bus_probe_device+0x50/0xe4 [] device_add+0x414/0x738 [] usb_set_configuration+0x89c/0x914 [] driver_set_config_work+0xc0/0xf0 [] process_one_work+0x390/0x6b8 [] worker_thread+0x480/0x610 [] kthread+0x164/0x178 [] ret_from_fork+0x10/0x40 Since we don't know anything about LPM capabilities without BOS descriptor, don't attempt to enable LPM if it is not available. Fixes: 890dae886721 ("xhci: Enable LPM support only for hardwired ...") Cc: stable Cc: Mathias Nyman Signed-off-by: Guenter Roeck Acked-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f0dd08198d74..5286bf67869a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; - if (!udev->usb2_hw_lpm_capable) + if (!udev->usb2_hw_lpm_capable || !udev->bos) return; if (hub) -- cgit v1.2.3 From b767ad726c2aa6219318bf0da83fbe690e653d9a Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Wed, 1 Mar 2017 18:23:02 +0300 Subject: Revert "tty: serial: pl011: add ttyAMA for matching pl011 console" The original patch makes the condition always true, so it is wrong. It masks (but not fixes) the bug described in the commit message but introduces a regression (no console is selected by SPCR) in regular (no 'console=ttyAMA') case. s/||/&&/ would not fix the problem as the root cause was identified incorrectly. This reverts commit aea9a80ba98a0c9b4de88850260e9fbdcc98360b. Signed-off-by: Aleksey Makarov Signed-off-by: Greg Kroah-Hartman Acked-by: Sudeep Holla Tested-by: Jayachandran C Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/amba-pl011.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8789ea423ccf..56f92d7348bf 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx, if (strcmp(name, "qdf2400_e44") == 0) { pr_info_once("UART: Working around QDF2400 SoC erratum 44"); qdf2400_e44_present = true; - } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) { + } else if (strcmp(name, "pl011") != 0) { return -ENODEV; } -- cgit v1.2.3 From 5362544bebe85071188dd9e479b5a5040841c895 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Sat, 4 Mar 2017 14:55:19 +0100 Subject: tty: don't panic on OOM in tty_set_ldisc() If tty_ldisc_open() fails in tty_set_ldisc(), it tries to go back to the old discipline or N_TTY. But that can fail as well, in such case it panics. This is not a graceful way to handle OOM. Leave ldisc==NULL if all attempts fail instead. Also use existing tty_ldisc_reinit() helper function instead of tty_ldisc_restore(). Also don't WARN/BUG in tty_ldisc_reinit() if N_TTY fails, which would have the same net effect of bringing kernel down on OOM. Instead print a single line message about what has happened. Signed-off-by: Dmitry Vyukov Cc: syzkaller@googlegroups.com Cc: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman Cc: Jiri Slaby Cc: Peter Hurley Cc: One Thousand Gnomes Signed-off-by: Greg Kroah-Hartman --- drivers/tty/tty_ldisc.c | 85 ++++++++++--------------------------------------- 1 file changed, 16 insertions(+), 69 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 68947f6de5ad..c3956ca022e4 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -488,41 +488,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) tty_ldisc_debug(tty, "%p: closed\n", ld); } -/** - * tty_ldisc_restore - helper for tty ldisc change - * @tty: tty to recover - * @old: previous ldisc - * - * Restore the previous line discipline or N_TTY when a line discipline - * change fails due to an open error - */ - -static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) -{ - struct tty_ldisc *new_ldisc; - int r; - - /* There is an outstanding reference here so this is safe */ - old = tty_ldisc_get(tty, old->ops->num); - WARN_ON(IS_ERR(old)); - tty->ldisc = old; - tty_set_termios_ldisc(tty, old->ops->num); - if (tty_ldisc_open(tty, old) < 0) { - tty_ldisc_put(old); - /* This driver is always present */ - new_ldisc = tty_ldisc_get(tty, N_TTY); - if (IS_ERR(new_ldisc)) - panic("n_tty: get"); - tty->ldisc = new_ldisc; - tty_set_termios_ldisc(tty, N_TTY); - r = tty_ldisc_open(tty, new_ldisc); - if (r < 0) - panic("Couldn't open N_TTY ldisc for " - "%s --- error %d.", - tty_name(tty), r); - } -} - /** * tty_set_ldisc - set line discipline * @tty: the terminal to set @@ -536,12 +501,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) int tty_set_ldisc(struct tty_struct *tty, int disc) { - int retval; - struct tty_ldisc *old_ldisc, *new_ldisc; - - new_ldisc = tty_ldisc_get(tty, disc); - if (IS_ERR(new_ldisc)) - return PTR_ERR(new_ldisc); + int retval, old_disc; tty_lock(tty); retval = tty_ldisc_lock(tty, 5 * HZ); @@ -554,7 +514,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) } /* Check the no-op case */ - if (tty->ldisc->ops->num == disc) + old_disc = tty->ldisc->ops->num; + if (old_disc == disc) goto out; if (test_bit(TTY_HUPPED, &tty->flags)) { @@ -563,34 +524,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) goto out; } - old_ldisc = tty->ldisc; - - /* Shutdown the old discipline. */ - tty_ldisc_close(tty, old_ldisc); - - /* Now set up the new line discipline. */ - tty->ldisc = new_ldisc; - tty_set_termios_ldisc(tty, disc); - - retval = tty_ldisc_open(tty, new_ldisc); + retval = tty_ldisc_reinit(tty, disc); if (retval < 0) { /* Back to the old one or N_TTY if we can't */ - tty_ldisc_put(new_ldisc); - tty_ldisc_restore(tty, old_ldisc); + if (tty_ldisc_reinit(tty, old_disc) < 0) { + pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); + if (tty_ldisc_reinit(tty, N_TTY) < 0) { + /* At this point we have tty->ldisc == NULL. */ + pr_err("tty: reinitializing N_TTY failed\n"); + } + } } - if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { + if (tty->ldisc && tty->ldisc->ops->num != old_disc && + tty->ops->set_ldisc) { down_read(&tty->termios_rwsem); tty->ops->set_ldisc(tty); up_read(&tty->termios_rwsem); } - /* At this point we hold a reference to the new ldisc and a - reference to the old ldisc, or we hold two references to - the old ldisc (if it was restored as part of error cleanup - above). In either case, releasing a single reference from - the old ldisc is correct. */ - new_ldisc = old_ldisc; out: tty_ldisc_unlock(tty); @@ -598,7 +550,6 @@ out: already running */ tty_buffer_restart_work(tty->port); err: - tty_ldisc_put(new_ldisc); /* drop the extra reference */ tty_unlock(tty); return retval; } @@ -659,10 +610,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) int retval; ld = tty_ldisc_get(tty, disc); - if (IS_ERR(ld)) { - BUG_ON(disc == N_TTY); + if (IS_ERR(ld)) return PTR_ERR(ld); - } if (tty->ldisc) { tty_ldisc_close(tty, tty->ldisc); @@ -674,10 +623,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) tty_set_termios_ldisc(tty, disc); retval = tty_ldisc_open(tty, tty->ldisc); if (retval) { - if (!WARN_ON(disc == N_TTY)) { - tty_ldisc_put(tty->ldisc); - tty->ldisc = NULL; - } + tty_ldisc_put(tty->ldisc); + tty->ldisc = NULL; } return retval; } -- cgit v1.2.3 From a4a3e061149f09c075f108b6f1cf04d9739a6bc2 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Sat, 4 Mar 2017 13:46:12 +0100 Subject: tty: fix data race in tty_ldisc_ref_wait() tty_ldisc_ref_wait() checks tty->ldisc under tty->ldisc_sem. But if ldisc==NULL it releases them sem and reloads tty->ldisc without holding the sem. This is wrong and can lead to returning non-NULL ldisc without protection. Don't reload tty->ldisc second time. Signed-off-by: Dmitry Vyukov Cc: syzkaller@googlegroups.com Cc: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman Cc: Jiri Slaby Cc: Peter Hurley Cc: One Thousand Gnomes Signed-off-by: Greg Kroah-Hartman --- drivers/tty/tty_ldisc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index c3956ca022e4..b0500a0a87b8 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = { struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) { + struct tty_ldisc *ld; + ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); - if (!tty->ldisc) + ld = tty->ldisc; + if (!ld) ldsem_up_read(&tty->ldisc_sem); - return tty->ldisc; + return ld; } EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); -- cgit v1.2.3 From 2cd29f2387be70de9feb4c9f8dbc7c0bd55748ce Mon Sep 17 00:00:00 2001 From: Robert Middleton Date: Wed, 15 Mar 2017 16:56:47 -0400 Subject: gpio:mcp23s08 Fixed missing interrupts When an interrupt occurs on an MCP23S08 chip, the INTF register will only contain one bit as causing the interrupt. If more than two pins change at the same time on the chip, this causes one of the pins to not be reported. This patch fixes the logic for checking if a pin has changed, so that multiple pins will always cause more than one change. Cc: stable@vger.kernel.org Signed-off-by: Robert Middleton Tested-by: Phil Reid Signed-off-by: Linus Walleij --- drivers/gpio/gpio-mcp23s08.c | 65 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 60 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index bdb692345428..2a57d024481d 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value) static irqreturn_t mcp23s08_irq(int irq, void *data) { struct mcp23s08 *mcp = data; - int intcap, intf, i; + int intcap, intf, i, gpio, gpio_orig, intcap_mask; unsigned int child_irq; + bool intf_set, intcap_changed, gpio_bit_changed, + defval_changed, gpio_set; mutex_lock(&mcp->lock); if (mcp_read(mcp, MCP_INTF, &intf) < 0) { @@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data) } mcp->cache[MCP_INTCAP] = intcap; + + /* This clears the interrupt(configurable on S18) */ + if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) { + mutex_unlock(&mcp->lock); + return IRQ_HANDLED; + } + gpio_orig = mcp->cache[MCP_GPIO]; + mcp->cache[MCP_GPIO] = gpio; mutex_unlock(&mcp->lock); + if (mcp->cache[MCP_INTF] == 0) { + /* There is no interrupt pending */ + return IRQ_HANDLED; + } + + dev_dbg(mcp->chip.parent, + "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n", + intcap, intf, gpio_orig, gpio); for (i = 0; i < mcp->chip.ngpio; i++) { - if ((BIT(i) & mcp->cache[MCP_INTF]) && - ((BIT(i) & intcap & mcp->irq_rise) || - (mcp->irq_fall & ~intcap & BIT(i)) || - (BIT(i) & mcp->cache[MCP_INTCON]))) { + /* We must check all of the inputs on the chip, + * otherwise we may not notice a change on >=2 pins. + * + * On at least the mcp23s17, INTCAP is only updated + * one byte at a time(INTCAPA and INTCAPB are + * not written to at the same time - only on a per-bank + * basis). + * + * INTF only contains the single bit that caused the + * interrupt per-bank. On the mcp23s17, there is + * INTFA and INTFB. If two pins are changed on the A + * side at the same time, INTF will only have one bit + * set. If one pin on the A side and one pin on the B + * side are changed at the same time, INTF will have + * two bits set. Thus, INTF can't be the only check + * to see if the input has changed. + */ + + intf_set = BIT(i) & mcp->cache[MCP_INTF]; + if (i < 8 && intf_set) + intcap_mask = 0x00FF; + else if (i >= 8 && intf_set) + intcap_mask = 0xFF00; + else + intcap_mask = 0x00; + + intcap_changed = (intcap_mask & + (BIT(i) & mcp->cache[MCP_INTCAP])) != + (intcap_mask & (BIT(i) & gpio_orig)); + gpio_set = BIT(i) & mcp->cache[MCP_GPIO]; + gpio_bit_changed = (BIT(i) & gpio_orig) != + (BIT(i) & mcp->cache[MCP_GPIO]); + defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) && + ((BIT(i) & mcp->cache[MCP_GPIO]) != + (BIT(i) & mcp->cache[MCP_DEFVAL])); + + if (((gpio_bit_changed || intcap_changed) && + (BIT(i) & mcp->irq_rise) && gpio_set) || + ((gpio_bit_changed || intcap_changed) && + (BIT(i) & mcp->irq_fall) && !gpio_set) || + defval_changed) { child_irq = irq_find_mapping(mcp->chip.irqdomain, i); handle_nested_irq(child_irq); } -- cgit v1.2.3 From 4938ca90166d6d3061793789e2eef42cd934fa97 Mon Sep 17 00:00:00 2001 From: Zhao Yan Date: Thu, 9 Mar 2017 10:09:44 +0800 Subject: drm/i915/gvt: handle force-nonpriv registers, cmd parser part this patch adds force non-priv registers check in LRI cmds handler v4: transform is_force_nonpriv_mmio() from macro to inline fuction to eliminate checkpatch warning v3: per zhenyu's comment, fix some style warnings v2: per zhenyu's comment, refine the code to remove cascaded ifs Signed-off-by: Zhao Yan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/handlers.c | 17 +++++++++++++++++ drivers/gpu/drm/i915/gvt/mmio.h | 3 +++ 3 files changed, 43 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 7ae6e2b241c8..919c83abaeb1 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset) return ret; } +static inline bool is_force_nonpriv_mmio(unsigned int offset) +{ + return (offset >= 0x24d0 && offset < 0x2500); +} + +static int force_nonpriv_reg_handler(struct parser_exec_state *s, + unsigned int offset, unsigned int index) +{ + struct intel_gvt *gvt = s->vgpu->gvt; + unsigned int data = cmd_val(s, index + 1); + + if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { + gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", + offset, data); + return -EINVAL; + } + return 0; +} + static int cmd_reg_handler(struct parser_exec_state *s, unsigned int offset, unsigned int index, char *cmd) { @@ -841,6 +860,10 @@ static int cmd_reg_handler(struct parser_exec_state *s, return 0; } + if (is_force_nonpriv_mmio(offset) && + force_nonpriv_reg_handler(s, offset, index)) + return -EINVAL; + if (offset == i915_mmio_reg_offset(DERRMR) || offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8e43395c748a..de975f40aebf 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2988,3 +2988,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, write_vreg(vgpu, offset, p_data, bytes); return 0; } + +/** + * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be + * force-nopriv register + * + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * True if the register is in force-nonpriv whitelist; + * False if outside; + */ +bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, + unsigned int offset) +{ + return in_whitelist(offset); +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 3bc620f56f35..a3a027025cd0 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); + +bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, + unsigned int offset); #endif -- cgit v1.2.3 From 695fbc08d80f93ecca18a1abd8f52c2ab77fdc8d Mon Sep 17 00:00:00 2001 From: Tina Zhang Date: Fri, 10 Mar 2017 04:26:53 -0500 Subject: drm/i915/gvt: replace the gvt_err with gvt_vgpu_err gvt_err should be used only for the very few critical error message during host i915 drvier initialization. This patch 1. removes the redundant gvt_err; 2. creates a new gvt_vgpu_err to show errors caused by vgpu; 3. replaces the most gvt_err with gvt_vgpu_err; 4. leaves very few gvt_err for dumping gvt error during host gvt initialization. v2. change name to gvt_vgpu_err and add vgpu id to the message. (Kevin) add gpu id to gvt_vgpu_err. (Zhi) v3. remove gpu id from gvt_vgpu_err caller. (Zhi) v4. add vgpu check to the gvt_vgpu_err macro. (Zhiyuan) v5. add comments for v3 and v4. v6. split the big patch into two, with this patch only for checking gvt_vgpu_err. (Zhenyu) v7. rebase to staging branch v8. rebase to fix branch Signed-off-by: Tina Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/aperture_gm.c | 8 ++-- drivers/gpu/drm/i915/gvt/cmd_parser.c | 84 +++++++++++++++++++--------------- drivers/gpu/drm/i915/gvt/debug.h | 8 ++++ drivers/gpu/drm/i915/gvt/edid.c | 13 +++--- drivers/gpu/drm/i915/gvt/execlist.c | 29 ++++++------ drivers/gpu/drm/i915/gvt/gtt.c | 74 +++++++++++++++--------------- drivers/gpu/drm/i915/gvt/handlers.c | 28 ++++++------ drivers/gpu/drm/i915/gvt/kvmgt.c | 33 +++++++------ drivers/gpu/drm/i915/gvt/mmio.c | 38 +++++++-------- drivers/gpu/drm/i915/gvt/opregion.c | 10 ++-- drivers/gpu/drm/i915/gvt/render.c | 2 +- drivers/gpu/drm/i915/gvt/scheduler.c | 11 +++-- 12 files changed, 180 insertions(+), 158 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 3b6caaca9751..325618d969fe 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, const char *item; if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { - gvt_err("Invalid vGPU creation params\n"); + gvt_vgpu_err("Invalid vGPU creation params\n"); return -EINVAL; } @@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu, return 0; no_enough_resource: - gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); - gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", - vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), + gvt_vgpu_err("fail to allocate resource %s\n", item); + gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n", + BYTES_TO_MB(request), BYTES_TO_MB(avail), BYTES_TO_MB(max), BYTES_TO_MB(taken)); return -ENOSPC; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 919c83abaeb1..2ca0506d8d8c 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -843,20 +843,19 @@ static int cmd_reg_handler(struct parser_exec_state *s, struct intel_gvt *gvt = vgpu->gvt; if (offset + 4 > gvt->device_info.mmio_size) { - gvt_err("%s access to (%x) outside of MMIO range\n", + gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", cmd, offset); return -EINVAL; } if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { - gvt_err("vgpu%d: %s access to non-render register (%x)\n", - s->vgpu->id, cmd, offset); + gvt_vgpu_err("%s access to non-render register (%x)\n", + cmd, offset); return 0; } if (is_shadowed_mmio(offset)) { - gvt_err("vgpu%d: found access of shadowed MMIO %x\n", - s->vgpu->id, offset); + gvt_vgpu_err("found access of shadowed MMIO %x\n", offset); return 0; } @@ -1152,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; + struct intel_vgpu *vgpu = s->vgpu; u32 dword0 = cmd_val(s, 0); u32 dword1 = cmd_val(s, 1); u32 dword2 = cmd_val(s, 2); @@ -1190,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, break; default: - gvt_err("unknown plane code %d\n", plane); + gvt_vgpu_err("unknown plane code %d\n", plane); return -EINVAL; } @@ -1297,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip( static int cmd_handler_mi_display_flip(struct parser_exec_state *s) { struct mi_display_flip_command_info info; + struct intel_vgpu *vgpu = s->vgpu; int ret; int i; int len = cmd_length(s); ret = decode_mi_display_flip(s, &info); if (ret) { - gvt_err("fail to decode MI display flip command\n"); + gvt_vgpu_err("fail to decode MI display flip command\n"); return ret; } ret = check_mi_display_flip(s, &info); if (ret) { - gvt_err("invalid MI display flip command\n"); + gvt_vgpu_err("invalid MI display flip command\n"); return ret; } ret = update_plane_mmio_from_mi_display_flip(s, &info); if (ret) { - gvt_err("fail to update plane mmio\n"); + gvt_vgpu_err("fail to update plane mmio\n"); return ret; } @@ -1373,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s, int ret; if (op_size > max_surface_size) { - gvt_err("command address audit fail name %s\n", s->info->name); + gvt_vgpu_err("command address audit fail name %s\n", + s->info->name); return -EINVAL; } @@ -1390,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, } return 0; err: - gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", + gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", s->info->name, guest_gma, op_size); pr_err("cmd dump: "); @@ -1435,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) static inline int unexpected_cmd(struct parser_exec_state *s) { - gvt_err("vgpu%d: Unexpected %s in command buffer!\n", - s->vgpu->id, s->info->name); + struct intel_vgpu *vgpu = s->vgpu; + + gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); + return -EINVAL; } @@ -1539,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, while (gma != end_gma) { gpa = intel_vgpu_gma_to_gpa(mm, gma); if (gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("invalid gma address: %lx\n", gma); + gvt_vgpu_err("invalid gma address: %lx\n", gma); return -EFAULT; } @@ -1580,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) uint32_t bb_size = 0; uint32_t cmd_len = 0; bool met_bb_end = false; + struct intel_vgpu *vgpu = s->vgpu; u32 cmd; /* get the start gm address of the batch buffer */ @@ -1588,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); return -EINVAL; } @@ -1597,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) gma, gma + 4, &cmd); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); return -EINVAL; } @@ -1622,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) static int perform_bb_shadow(struct parser_exec_state *s) { struct intel_shadow_bb_entry *entry_obj; + struct intel_vgpu *vgpu = s->vgpu; unsigned long gma = 0; uint32_t bb_size; void *dst = NULL; @@ -1656,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); if (ret) { - gvt_err("failed to set shadow batch to CPU\n"); + gvt_vgpu_err("failed to set shadow batch to CPU\n"); goto unmap_src; } @@ -1668,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) gma, gma + bb_size, dst); if (ret) { - gvt_err("fail to copy guest ring buffer\n"); + gvt_vgpu_err("fail to copy guest ring buffer\n"); goto unmap_src; } @@ -1699,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) { bool second_level; int ret = 0; + struct intel_vgpu *vgpu = s->vgpu; if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { - gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); + gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); return -EINVAL; } second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { - gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); + gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); return -EINVAL; } @@ -1725,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) if (batch_buffer_needs_scan(s)) { ret = perform_bb_shadow(s); if (ret < 0) - gvt_err("invalid shadow batch buffer\n"); + gvt_vgpu_err("invalid shadow batch buffer\n"); } else { /* emulate a batch buffer end to do return right */ ret = cmd_handler_mi_batch_buffer_end(s); @@ -2452,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) int ret = 0; cycles_t t0, t1, t2; struct parser_exec_state s_before_advance_custom; + struct intel_vgpu *vgpu = s->vgpu; t0 = get_cycles(); @@ -2459,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); return -EINVAL; } @@ -2475,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (info->handler) { ret = info->handler(s); if (ret < 0) { - gvt_err("%s handler error\n", info->name); + gvt_vgpu_err("%s handler error\n", info->name); return ret; } } @@ -2486,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { ret = cmd_advance_default(s); if (ret) { - gvt_err("%s IP advance error\n", info->name); + gvt_vgpu_err("%s IP advance error\n", info->name); return ret; } } @@ -2509,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s, unsigned long gma_head, gma_tail, gma_bottom; int ret = 0; + struct intel_vgpu *vgpu = s->vgpu; gma_head = rb_start + rb_head; gma_tail = rb_start + rb_tail; @@ -2520,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s, if (s->buf_type == RING_BUFFER_INSTRUCTION) { if (!(s->ip_gma >= rb_start) || !(s->ip_gma < gma_bottom)) { - gvt_err("ip_gma %lx out of ring scope." + gvt_vgpu_err("ip_gma %lx out of ring scope." "(base:0x%lx, bottom: 0x%lx)\n", s->ip_gma, rb_start, gma_bottom); @@ -2528,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s, return -EINVAL; } if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { - gvt_err("ip_gma %lx out of range." + gvt_vgpu_err("ip_gma %lx out of range." "base 0x%lx head 0x%lx tail 0x%lx\n", s->ip_gma, rb_start, rb_head, rb_tail); @@ -2538,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s, } ret = cmd_parser_exec(s); if (ret) { - gvt_err("cmd parser error\n"); + gvt_vgpu_err("cmd parser error\n"); parser_exec_state_dump(s); break; } @@ -2662,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_head, gma_top, workload->shadow_ring_buffer_va); if (ret) { - gvt_err("fail to copy guest ring buffer\n"); + gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } copy_len = gma_top - gma_head; @@ -2674,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_head, gma_tail, workload->shadow_ring_buffer_va + copy_len); if (ret) { - gvt_err("fail to copy guest ring buffer\n"); + gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } ring->tail += workload->rb_len; @@ -2685,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { int ret; + struct intel_vgpu *vgpu = workload->vgpu; ret = shadow_workload_ring_buffer(workload); if (ret) { - gvt_err("fail to shadow workload ring_buffer\n"); + gvt_vgpu_err("fail to shadow workload ring_buffer\n"); return ret; } ret = scan_workload(workload); if (ret) { - gvt_err("scan workload error\n"); + gvt_vgpu_err("scan workload error\n"); return ret; } return 0; @@ -2704,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ctx_size = wa_ctx->indirect_ctx.size; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; struct drm_i915_gem_object *obj; int ret = 0; void *map; @@ -2717,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) /* get the va of the shadow batch buffer */ map = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(map)) { - gvt_err("failed to vmap shadow indirect ctx\n"); + gvt_vgpu_err("failed to vmap shadow indirect ctx\n"); ret = PTR_ERR(map); goto put_obj; } ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) { - gvt_err("failed to set shadow indirect ctx to CPU\n"); + gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); goto unmap_src; } @@ -2733,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) guest_gma, guest_gma + ctx_size, map); if (ret) { - gvt_err("fail to copy guest indirect ctx\n"); + gvt_vgpu_err("fail to copy guest indirect ctx\n"); goto unmap_src; } @@ -2767,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ret; + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; if (wa_ctx->indirect_ctx.size == 0) return 0; ret = shadow_indirect_ctx(wa_ctx); if (ret) { - gvt_err("fail to shadow indirect ctx\n"); + gvt_vgpu_err("fail to shadow indirect ctx\n"); return ret; } @@ -2781,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ret = scan_wa_ctx(wa_ctx); if (ret) { - gvt_err("scan wa ctx error\n"); + gvt_vgpu_err("scan wa ctx error\n"); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h index 68cba7bd980a..b0cff4dc2684 100644 --- a/drivers/gpu/drm/i915/gvt/debug.h +++ b/drivers/gpu/drm/i915/gvt/debug.h @@ -27,6 +27,14 @@ #define gvt_err(fmt, args...) \ DRM_ERROR("gvt: "fmt, ##args) +#define gvt_vgpu_err(fmt, args...) \ +do { \ + if (IS_ERR_OR_NULL(vgpu)) \ + DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \ + else \ + DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ +} while (0) + #define gvt_dbg_core(fmt, args...) \ DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index bda85dff7b2a..f1648fe5e5ea 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) unsigned char chr = 0; if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { - gvt_err("Driver tries to read EDID without proper sequence!\n"); + gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } if (edid->current_edid_read >= EDID_SIZE) { - gvt_err("edid_get_byte() exceeds the size of EDID!\n"); + gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); return 0; } if (!edid->edid_available) { - gvt_err("Reading EDID but EDID is not available!\n"); + gvt_vgpu_err("Reading EDID but EDID is not available!\n"); return 0; } @@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) chr = edid_data->edid_block[edid->current_edid_read]; edid->current_edid_read++; } else { - gvt_err("No EDID available during the reading?\n"); + gvt_vgpu_err("No EDID available during the reading?\n"); } return chr; } @@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; break; default: - gvt_err("Unknown/reserved GMBUS cycle detected!\n"); + gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); break; } /* @@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, */ } else { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); - gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", - vgpu->id); + gvt_vgpu_err("warning: gmbus3 read with nothing returned\n"); } return 0; } diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 46eb9fd3c03f..f1f426a97aa9 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out( struct intel_vgpu_execlist *execlist, struct execlist_ctx_descriptor_format *ctx) { + struct intel_vgpu *vgpu = execlist->vgpu; struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; @@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out( gvt_dbg_el("schedule out context id %x\n", ctx->context_id); if (WARN_ON(!same_context(ctx, execlist->running_context))) { - gvt_err("schedule out context is not running context," + gvt_vgpu_err("schedule out context is not running context," "ctx id %x running ctx id %x\n", ctx->context_id, execlist->running_context->context_id); @@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot( status.udw = vgpu_vreg(vgpu, status_reg + 4); if (status.execlist_queue_full) { - gvt_err("virtual execlist slots are full\n"); + gvt_vgpu_err("virtual execlist slots are full\n"); return NULL; } @@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, struct execlist_ctx_descriptor_format *ctx0, *ctx1; struct execlist_context_status_format status; + struct intel_vgpu *vgpu = execlist->vgpu; gvt_dbg_el("emulate schedule-in\n"); if (!slot) { - gvt_err("no available execlist slot\n"); + gvt_vgpu_err("no available execlist slot\n"); return -EINVAL; } @@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); if (IS_ERR(vma)) { - gvt_err("Cannot pin\n"); return; } @@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, CACHELINE_BYTES, 0); if (IS_ERR(vma)) { - gvt_err("Cannot pin indirect ctx obj\n"); return; } @@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) { struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct intel_vgpu_mm *mm; + struct intel_vgpu *vgpu = workload->vgpu; int page_table_level; u32 pdp[8]; @@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ page_table_level = 4; } else { - gvt_err("Advanced Context mode(SVM) is not supported!\n"); + gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); return -EINVAL; } @@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, pdp, page_table_level, 0); if (IS_ERR(mm)) { - gvt_err("fail to create mm object.\n"); + gvt_vgpu_err("fail to create mm object.\n"); return PTR_ERR(mm); } } @@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("invalid guest context LRCA: %x\n", desc->lrca); + gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); return -EINVAL; } @@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) continue; if (!desc[i]->privilege_access) { - gvt_err("vgpu%d: unexpected GGTT elsp submission\n", - vgpu->id); + gvt_vgpu_err("unexpected GGTT elsp submission\n"); return -EINVAL; } @@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) } if (!valid_desc_bitmap) { - gvt_err("vgpu%d: no valid desc in a elsp submission\n", - vgpu->id); + gvt_vgpu_err("no valid desc in a elsp submission\n"); return -EINVAL; } if (!test_bit(0, (void *)&valid_desc_bitmap) && test_bit(1, (void *)&valid_desc_bitmap)) { - gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", - vgpu->id); + gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n"); return -EINVAL; } @@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ret = submit_context(vgpu, ring_id, &valid_desc[i], emulate_schedule_in); if (ret) { - gvt_err("vgpu%d: fail to schedule workload\n", - vgpu->id); + gvt_vgpu_err("fail to schedule workload\n"); return ret; } emulate_schedule_in = false; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6a5ff23ded90..da7312715824 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) { if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { - gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", - vgpu->id, addr, size); + gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", + addr, size); return false; } return true; @@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate gfn: 0x%lx\n", gfn); + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); return -ENXIO; } @@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); if (dma_mapping_error(kdev, daddr)) { - gvt_err("fail to map dma addr\n"); + gvt_vgpu_err("fail to map dma addr\n"); return -EINVAL; } @@ -735,7 +735,7 @@ retry: if (reclaim_one_mm(vgpu->gvt)) goto retry; - gvt_err("fail to allocate ppgtt shadow page\n"); + gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); return ERR_PTR(-ENOMEM); } @@ -750,14 +750,14 @@ retry: */ ret = init_shadow_page(vgpu, &spt->shadow_page, type); if (ret) { - gvt_err("fail to initialize shadow page for spt\n"); + gvt_vgpu_err("fail to initialize shadow page for spt\n"); goto err; } ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, gfn, ppgtt_write_protection_handler, NULL); if (ret) { - gvt_err("fail to initialize guest page for spt\n"); + gvt_vgpu_err("fail to initialize guest page for spt\n"); goto err; } @@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( if (p) return shadow_page_to_ppgtt_spt(p); - gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", - vgpu->id, mfn); + gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); return NULL; } @@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, } s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); if (!s) { - gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", - vgpu->id, ops->get_pfn(e)); + gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", + ops->get_pfn(e)); return -ENXIO; } return ppgtt_invalidate_shadow_page(s); @@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { + struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_entry e; unsigned long index; int ret; @@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) for_each_present_shadow_entry(spt, &e, index) { if (!gtt_type_is_pt(get_next_pt_type(e.type))) { - gvt_err("GVT doesn't support pse bit for now\n"); + gvt_vgpu_err("GVT doesn't support pse bit for now\n"); return -EINVAL; } ret = ppgtt_invalidate_shadow_page_by_shadow_entry( @@ -868,8 +868,8 @@ release: ppgtt_free_shadow_page(spt); return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", - spt->vgpu->id, spt, e.val64, e.type); + gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", + spt, e.val64, e.type); return ret; } @@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( } return s; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, s, we->val64, we->type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", + s, we->val64, we->type); return ERR_PTR(ret); } @@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) for_each_present_guest_entry(spt, &ge, i) { if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { - gvt_err("GVT doesn't support pse bit now\n"); + gvt_vgpu_err("GVT doesn't support pse bit now\n"); ret = -EINVAL; goto fail; } @@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) } return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, spt, ge.val64, ge.type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", + spt, ge.val64, ge.type); return ret; } @@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, struct intel_vgpu_ppgtt_spt *s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); if (!s) { - gvt_err("fail to find guest page\n"); + gvt_vgpu_err("fail to find guest page\n"); ret = -ENXIO; goto fail; } @@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, ppgtt_set_shadow_entry(spt, &e, index); return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, spt, e.val64, e.type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", + spt, e.val64, e.type); return ret; } @@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, } return 0; fail: - gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, - spt, we->val64, we->type); + gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", + spt, we->val64, we->type); return ret; } @@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table( } return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", - vgpu->id, spt, we->val64, we->type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", + spt, we->val64, we->type); return ret; } @@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm) spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); if (IS_ERR(spt)) { - gvt_err("fail to populate guest root pointer\n"); + gvt_vgpu_err("fail to populate guest root pointer\n"); ret = PTR_ERR(spt); goto fail; } @@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, ret = gtt->mm_alloc_page_table(mm); if (ret) { - gvt_err("fail to allocate page table for mm\n"); + gvt_vgpu_err("fail to allocate page table for mm\n"); goto fail; } @@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, } return mm; fail: - gvt_err("fail to create mm\n"); + gvt_vgpu_err("fail to create mm\n"); if (mm) intel_gvt_mm_unreference(mm); return ERR_PTR(ret); @@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) mm->page_table_level, gma, gpa); return gpa; err: - gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); + gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); return INTEL_GVT_INVALID_ADDR; } @@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (ops->test_present(&e)) { ret = gtt_entry_p2m(vgpu, &e, &m); if (ret) { - gvt_err("vgpu%d: fail to translate guest gtt entry\n", - vgpu->id); + gvt_vgpu_err("fail to translate guest gtt entry\n"); return ret; } } else { @@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { - gvt_err("fail to allocate scratch page\n"); + gvt_vgpu_err("fail to allocate scratch page\n"); return -ENOMEM; } daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, PCI_DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { - gvt_err("fail to dmamap scratch_pt\n"); + gvt_vgpu_err("fail to dmamap scratch_pt\n"); __free_page(virt_to_page(scratch_pt)); return -ENOMEM; } @@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, NULL, 1, 0); if (IS_ERR(ggtt_mm)) { - gvt_err("fail to create mm for ggtt.\n"); + gvt_vgpu_err("fail to create mm for ggtt.\n"); return PTR_ERR(ggtt_mm); } @@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt) for (i = 0; i < preallocated_oos_pages; i++) { oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); if (!oos_page) { - gvt_err("fail to pre-allocate oos page\n"); ret = -ENOMEM; goto fail; } @@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, pdp, page_table_level, 0); if (IS_ERR(mm)) { - gvt_err("fail to create mm\n"); + gvt_vgpu_err("fail to create mm\n"); return PTR_ERR(mm); } } @@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); if (!mm) { - gvt_err("fail to find ppgtt instance.\n"); + gvt_vgpu_err("fail to find ppgtt instance.\n"); return -EINVAL; } intel_gvt_mm_unreference(mm); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index de975f40aebf..eaff45d417e8 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); if (!vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: found oob fence register access\n", - vgpu->id); - gvt_err("vgpu%d: total fence %d, access fence %d\n", - vgpu->id, vgpu_fence_sz(vgpu), - fence_num); + gvt_vgpu_err("found oob fence register access\n"); + gvt_vgpu_err("total fence %d, access fence %d\n", + vgpu_fence_sz(vgpu), fence_num); } memset(p_data, 0, bytes); return -EINVAL; @@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, break; default: /*should not hit here*/ - gvt_err("invalid forcewake offset 0x%x\n", offset); + gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset); return -EINVAL; } } else { @@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; } else { - gvt_err("Invalid train pattern %d\n", train_pattern); + gvt_vgpu_err("Invalid train pattern %d\n", train_pattern); return -EINVAL; } @@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) index = FDI_RX_IMR_TO_PIPE(offset); else { - gvt_err("Unsupport registers %x\n", offset); + gvt_vgpu_err("Unsupport registers %x\n", offset); return -EINVAL; } @@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, u32 data; if (!dpy_is_valid_port(port_index)) { - gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); + gvt_vgpu_err("Unsupported DP port access!\n"); return 0; } @@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, if (i == num) { if (num == SBI_REG_MAX) { - gvt_err("vgpu%d: SBI caching meets maximum limits\n", - vgpu->id); + gvt_vgpu_err("SBI caching meets maximum limits\n"); return; } display->sbi.number++; @@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, break; } if (invalid_read) - gvt_err("invalid pvinfo read: [%x:%x] = %x\n", + gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); vgpu->pv_notified = true; return 0; @@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) case 1: /* Remove this in guest driver. */ break; default: - gvt_err("Invalid PV notification %d\n", notification); + gvt_vgpu_err("Invalid PV notification %d\n", notification); } return ret; } @@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); break; default: - gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", + gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", offset, bytes, data); break; } @@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (execlist->elsp_dwords.index == 3) { ret = intel_vgpu_submit_execlist(vgpu, ring_id); if(ret) - gvt_err("fail submit workload on ring %d\n", ring_id); + gvt_vgpu_err("fail submit workload on ring %d\n", + ring_id); } ++execlist->elsp_dwords.index; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 84d801638ede..cd218b07c6f6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) { - struct intel_vgpu *vgpu; + struct intel_vgpu *vgpu = NULL; struct intel_vgpu_type *type; struct device *pdev; void *gvt; @@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); if (!type) { - gvt_err("failed to find type %s to create\n", + gvt_vgpu_err("failed to find type %s to create\n", kobject_name(kobj)); ret = -EINVAL; goto out; @@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); - gvt_err("failed to create intel vgpu: %d\n", ret); + gvt_vgpu_err("failed to create intel vgpu: %d\n", ret); goto out; } @@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, &vgpu->vdev.iommu_notifier); if (ret != 0) { - gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); + gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", + ret); goto out; } @@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, &vgpu->vdev.group_notifier); if (ret != 0) { - gvt_err("vfio_register_notifier for group failed: %d\n", ret); + gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", + ret); goto undo_iommu; } @@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, if (index >= VFIO_PCI_NUM_REGIONS) { - gvt_err("invalid index: %u\n", index); + gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX: default: - gvt_err("unsupported region: %u\n", index); + gvt_vgpu_err("unsupported region: %u\n", index); } return ret == 0 ? count : ret; @@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, trigger = eventfd_ctx_fdget(fd); if (IS_ERR(trigger)) { - gvt_err("eventfd_ctx_fdget failed\n"); + gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } vgpu->vdev.msi_trigger = trigger; @@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS, &data_size); if (ret) { - gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); + gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); return -EINVAL; } if (data_size) { @@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) kvm = vgpu->vdev.kvm; if (!kvm || kvm->mm != current->mm) { - gvt_err("KVM is required to use Intel vGPU\n"); + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; } @@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { + struct intel_vgpu *vgpu = info->vgpu; + if (!info) { - gvt_err("kvmgt_guest_info invalid\n"); + gvt_vgpu_err("kvmgt_guest_info invalid\n"); return false; } @@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) unsigned long iova, pfn; struct kvmgt_guest_info *info; struct device *dev; + struct intel_vgpu *vgpu; int rc; if (!handle_valid(handle)) return INTEL_GVT_INVALID_ADDR; info = (struct kvmgt_guest_info *)handle; + vgpu = info->vgpu; iova = gvt_cache_find(info->vgpu, gfn); if (iova != INTEL_GVT_INVALID_ADDR) return iova; @@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) dev = mdev_dev(info->vgpu->vdev.mdev); rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (rc != 1) { - gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", + gfn, rc); return INTEL_GVT_INVALID_ADDR; } /* transfer to host iova for GFX to use DMA */ rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); if (rc) { - gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); + gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); vfio_unpin_pages(dev, &gfn, 1); return INTEL_GVT_INVALID_ADDR; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 60b698cb8365..1ba3bdb09341 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); if (ret) { - gvt_err("vgpu%d: guest page read error %d, " + gvt_vgpu_err("guest page read error %d, " "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - vgpu->id, ret, - gp->gfn, pa, *(u32 *)p_data, bytes); + ret, gp->gfn, pa, *(u32 *)p_data, + bytes); } mutex_unlock(&gvt->lock); return ret; @@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); if (!vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); + gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n", + offset, bytes, *(u32 *)p_data); if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); + gvt_vgpu_err("------------------------------------------\n"); + gvt_vgpu_err("likely triggers a gfx reset\n"); + gvt_vgpu_err("------------------------------------------\n"); vgpu->mmio.disable_warn_untrack = true; } } @@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, mutex_unlock(&gvt->lock); return 0; err: - gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", - vgpu->id, offset, bytes); + gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", + offset, bytes); mutex_unlock(&gvt->lock); return ret; } @@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, if (gp) { ret = gp->handler(gp, pa, p_data, bytes); if (ret) { - gvt_err("vgpu%d: guest page write error %d, " - "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - vgpu->id, ret, - gp->gfn, pa, *(u32 *)p_data, bytes); + gvt_err("guest page write error %d, " + "gfn 0x%lx, pa 0x%llx, " + "var 0x%x, len %d\n", + ret, gp->gfn, pa, + *(u32 *)p_data, bytes); } mutex_unlock(&gvt->lock); return ret; @@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, /* all register bits are RO. */ if (ro_mask == ~(u64)0) { - gvt_err("vgpu%d: try to write RO reg %x\n", - vgpu->id, offset); + gvt_vgpu_err("try to write RO reg %x\n", + offset); ret = 0; goto out; } @@ -360,8 +360,8 @@ out: mutex_unlock(&gvt->lock); return 0; err: - gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", - vgpu->id, offset, bytes); + gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, + bytes); mutex_unlock(&gvt->lock); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 5d1caf9daba9..311799136d7f 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va + i * PAGE_SIZE); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to get MFN from VA\n"); + gvt_vgpu_err("fail to get MFN from VA\n"); return -EINVAL; } ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, vgpu_opregion(vgpu)->gfn[i], mfn, 1, map); if (ret) { - gvt_err("fail to map GFN to MFN, errno: %d\n", ret); + gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", + ret); return ret; } } @@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; if (!(swsci & SWSCI_SCI_SELECT)) { - gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); + gvt_vgpu_err("requesting SMI service\n"); return 0; } /* ignore non 0->1 trasitions */ @@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) func = GVT_OPREGION_FUNC(*scic); subfunc = GVT_OPREGION_SUBFUNC(*scic); if (!querying_capabilities(*scic)) { - gvt_err("vgpu%d: requesting runtime service: func \"%s\"," + gvt_vgpu_err("requesting runtime service: func \"%s\"," " subfunc \"%s\"\n", - vgpu->id, opregion_func_name(func), opregion_subfunc_name(subfunc)); /* diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 73f052a4f424..95ee091ce085 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) I915_WRITE_FW(reg, 0x1); if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) - gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); + gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); else vgpu_vreg(vgpu, regs[ring_id]) = 0; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d3a56c949025..d29e4352d529 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) (u32)((workload->ctx_desc.lrca + i) << GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("Invalid guest context descriptor\n"); + gvt_vgpu_err("Invalid guest context descriptor\n"); return -EINVAL; } @@ -176,6 +176,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_gem_request *rq; + struct intel_vgpu *vgpu = workload->vgpu; int ret; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", @@ -189,7 +190,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { - gvt_err("fail to allocate gem request\n"); + gvt_vgpu_err("fail to allocate gem request\n"); ret = PTR_ERR(rq); goto out; } @@ -322,7 +323,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) (u32)((workload->ctx_desc.lrca + i) << GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("invalid guest context descriptor\n"); + gvt_vgpu_err("invalid guest context descriptor\n"); return; } @@ -417,6 +418,7 @@ static int workload_thread(void *priv) int ring_id = p->ring_id; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; + struct intel_vgpu *vgpu = NULL; int ret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); DEFINE_WAIT_FUNC(wait, woken_wake_function); @@ -459,7 +461,8 @@ static int workload_thread(void *priv) mutex_unlock(&gvt->lock); if (ret) { - gvt_err("fail to dispatch workload, skip\n"); + vgpu = workload->vgpu; + gvt_vgpu_err("fail to dispatch workload, skip\n"); goto complete; } -- cgit v1.2.3 From 3f765a341798ebd4e0ece7cce34399a8fd4a7f9f Mon Sep 17 00:00:00 2001 From: Yulei Zhang Date: Mon, 13 Mar 2017 23:21:27 +0800 Subject: drm/i915/gvt: correct the ggtt valid bit check in pipe control command GGTT valid bit in pipe control command move to DWORD1 after SNB, so change the valid check code correspondingly. v2: per Zhenyu's comment, replace the bit check with MACRO define PIPE_CONTROL_GLOBAL_GTT_IVB Signed-off-by: Yulei Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2ca0506d8d8c..2b92cc8a7d1a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1030,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); else if (post_sync == 1) { /* check ggtt*/ - if ((cmd_val(s, 2) & (1 << 2))) { + if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { gma = cmd_val(s, 2) & GENMASK(31, 3); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, 3)) << 32; -- cgit v1.2.3 From 3dce2aca02929f180ab66171b333fa48fe485a03 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Mar 2017 22:08:08 +0000 Subject: drm/i915/gvt: Remove bogus retry around i915_wait_request commit 8f1117abb408 ("drm/i915/gvt: handle workload lifecycle properly") includes some nonsense to retry a indefinite wait - i915_wait_request() does not return until the request is completed when used from an uninterruptible context. Fixes: 8f1117abb408 ("drm/i915/gvt: handle workload lifecycle properly" Signed-off-by: Chris Wilson Cc: Chuanxiao Dong Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d29e4352d529..dd8f8cc69a76 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -468,19 +468,7 @@ static int workload_thread(void *priv) gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); -retry: - i915_wait_request(workload->req, - 0, MAX_SCHEDULE_TIMEOUT); - /* I915 has replay mechanism and a request will be replayed - * if there is i915 reset. So the seqno will be updated anyway. - * If the seqno is not updated yet after waiting, which means - * the replay may still be in progress and we can wait again. - */ - if (!i915_gem_request_completed(workload->req)) { - gvt_dbg_sched("workload %p not completed, wait again\n", - workload); - goto retry; - } + i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); complete: gvt_dbg_sched("will complete workload %p, status: %d\n", -- cgit v1.2.3 From cf2135ca3d50d6468e9216fef3d0d33c31af635b Mon Sep 17 00:00:00 2001 From: Chuanxiao Dong Date: Thu, 9 Mar 2017 23:07:12 +0800 Subject: drm/i915/gvt: add enable_execlists check before enable gvt The GVT-g needs execlists to be enabled otherwise gvt should be disabled. Add a check for enable_execlists before enabling gvt. v2: use DRM_INFO in response to the user action Signed-off-by: Chuanxiao Dong Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/intel_gvt.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index d23c0fcff751..8c04eca84351 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) goto bail; } + if (!i915.enable_execlists) { + DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n"); + goto bail; + } + /* * We're not in host or fail to find a MPT module, disable GVT-g */ -- cgit v1.2.3 From 5180edc2421117766fcb9c2d2dc6bfaeefdeb709 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 16 Mar 2017 09:45:09 +0800 Subject: drm/i915/kvmgt: fix suspicious rcu dereference usage The srcu read lock must be held while accessing kvm memslots. This patch fix below warning for function kvmgt_rw_gpa(). [ 165.345093] [ ERR: suspicious RCU usage. ] [ 165.416538] Call Trace: [ 165.418989] dump_stack+0x85/0xc2 [ 165.422310] lockdep_rcu_suspicious+0xd7/0x110 [ 165.426769] kvm_read_guest_page+0x195/0x1b0 [kvm] [ 165.431574] kvm_read_guest+0x50/0x90 [kvm] [ 165.440492] kvmgt_rw_gpa+0x43/0xa0 [kvmgt] [ 165.444683] kvmgt_read_gpa+0x11/0x20 [kvmgt] [ 165.449061] gtt_get_entry64+0x4d/0xc0 [i915] [ 165.453438] ppgtt_populate_shadow_page_by_guest_entry+0x380/0xdc0 [i915] [ 165.460254] shadow_mm+0xd1/0x460 [i915] [ 165.472488] intel_vgpu_create_mm+0x1ab/0x210 [i915] [ 165.477472] intel_vgpu_g2v_create_ppgtt_mm+0x5f/0xc0 [i915] [ 165.483154] pvinfo_mmio_write+0x19b/0x1d0 [i915] [ 165.499068] intel_vgpu_emulate_mmio_write+0x3f9/0x600 [i915] [ 165.504827] intel_vgpu_rw+0x114/0x150 [kvmgt] [ 165.509281] intel_vgpu_write+0x16f/0x1a0 [kvmgt] [ 165.513993] vfio_mdev_write+0x20/0x30 [vfio_mdev] [ 165.518793] vfio_device_fops_write+0x24/0x30 [vfio] [ 165.523770] __vfs_write+0x28/0x120 [ 165.540529] vfs_write+0xce/0x1f0 v2: fix Cc format for stable Signed-off-by: Changbin Du Cc: # v4.10+ Reviewed-by: Xiao Guangrong Reviewed-by: Jike Song Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index cd218b07c6f6..1ea3eb270de8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1424,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, { struct kvmgt_guest_info *info; struct kvm *kvm; - int ret; + int idx, ret; bool kthread = current->mm == NULL; if (!handle_valid(handle)) @@ -1436,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, if (kthread) use_mm(kvm->mm); + idx = srcu_read_lock(&kvm->srcu); ret = write ? kvm_write_guest(kvm, gpa, buf, len) : kvm_read_guest(kvm, gpa, buf, len); + srcu_read_unlock(&kvm->srcu, idx); if (kthread) unuse_mm(kvm->mm); -- cgit v1.2.3 From 17f1b1a6d4e5f41df161eb2a90af22c003a243fc Mon Sep 17 00:00:00 2001 From: Tina Zhang Date: Wed, 15 Mar 2017 23:16:01 -0400 Subject: drm/i915/gvt: scan shadow indirect context image when valid The shadow indirect context image should be only scanned when valid. So far, Only RCS ring has the shadow indirect context image. This patch limits the scan logic only for RCS ring. v2. refine description of the subject v3. fix alignment. (Zhenyu) Signed-off-by: Tina Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index dd8f8cc69a76..907e6bc794f6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -203,9 +203,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (ret) goto out; - ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); - if (ret) - goto out; + if ((workload->ring_id == RCS) && + (workload->wa_ctx.indirect_ctx.size != 0)) { + ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); + if (ret) + goto out; + } ret = populate_shadow_context(workload); if (ret) -- cgit v1.2.3 From 3cd23b828b37bd5ccf4b40132f12dbf20d7afdb6 Mon Sep 17 00:00:00 2001 From: Chuanxiao Dong Date: Thu, 16 Mar 2017 09:47:58 +0800 Subject: drm/i915/gvt: GVT pin/unpin shadow context When handling guest request, GVT needs to populate/update shadow_ctx with guest context. This behavior needs to make sure the shadow_ctx is pinned. The current implementation is relying on i195 allocate request to pin but this way cannot guarantee the i915 not to unpin the shadow_ctx when GVT update the guest context from shadow_ctx. So GVT should pin/unpin the shadow_ctx by itself. Signed-off-by: Chuanxiao Dong Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 907e6bc794f6..39a83eb7aecc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -175,6 +175,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; struct intel_vgpu *vgpu = workload->vgpu; int ret; @@ -188,6 +189,21 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&dev_priv->drm.struct_mutex); + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ret = engine->context_pin(engine, shadow_ctx); + if (ret) { + gvt_vgpu_err("fail to pin shadow context\n"); + workload->status = ret; + mutex_unlock(&dev_priv->drm.struct_mutex); + return ret; + } + rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); @@ -231,6 +247,9 @@ out: if (!IS_ERR_OR_NULL(rq)) i915_add_request_no_flush(rq); + else + engine->context_unpin(engine, shadow_ctx); + mutex_unlock(&dev_priv->drm.struct_mutex); return ret; } @@ -380,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) * For the workload w/o request, directly complete the workload. */ if (workload->req) { + struct drm_i915_private *dev_priv = + workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = + dev_priv->engine[workload->ring_id]; wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -392,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) INTEL_GVT_EVENT_MAX) intel_vgpu_trigger_virtual_event(vgpu, event); } + mutex_lock(&dev_priv->drm.struct_mutex); + /* unpin shadow ctx as the shadow_ctx update is done */ + engine->context_unpin(engine, workload->vgpu->shadow_ctx); + mutex_unlock(&dev_priv->drm.struct_mutex); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", -- cgit v1.2.3 From 2958b9013fcbabeeba221161d0712f5259f1e15d Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 17 Mar 2017 12:11:20 +0800 Subject: drm/i915/gvt: Fix gvt scheduler interval time Fix to correctly assign 1ms for gvt scheduler interval time, as previous code using HZ is pretty broken. And use no delay for start gvt scheduler function. Fixes: 4b63960ebd3f ("drm/i915/gvt: vGPU schedule policy framework") Cc: Zhi Wang Cc: stable@vger.kernel.org # v4.10+ Acked-by: Chuanxiao Dong Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/sched_policy.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 06c9584ac5f0..34b9acdf3479 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -101,7 +101,7 @@ struct tbs_sched_data { struct list_head runq_head; }; -#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) +#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) static void tbs_sched_func(struct work_struct *work) { @@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) return; list_add_tail(&vgpu_data->list, &sched_data->runq_head); - schedule_delayed_work(&sched_data->work, sched_data->period); + schedule_delayed_work(&sched_data->work, 0); } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) -- cgit v1.2.3 From cf8c73afb3abf0f8905efbaddd4ce11a0deec9da Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 17 Mar 2017 10:22:51 +0800 Subject: drm/amd/amdgpu: add POLARIS12 PCI ID Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f7adbace428a..b76cd699eb0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0, 0, 0} -- cgit v1.2.3 From e7348396c6d51b57c95c6646c390cd078e038e19 Mon Sep 17 00:00:00 2001 From: Masaki Ota Date: Fri, 17 Mar 2017 14:10:57 -0700 Subject: Input: ALPS - fix V8+ protocol handling (73 03 28) Devices identified as E7="73 03 28" use slightly modified version of V8 protocol, with lower count per electrode, different offsets, and different feature bits in OTP data. Fixes: aeaa881f9b17 ("Input: ALPS - set DualPoint flag for 74 03 28 devices") Signed-off-by: Masaki Ota Acked-by: Pali Rohar Tested-by: Paul Donohue Tested-by: Nick Fletcher Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 66 +++++++++++++++++++++++++++++++++++----------- drivers/input/mouse/alps.h | 11 ++++++++ 2 files changed, 61 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 72b28ebfe360..262d9b620fcf 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -2462,14 +2462,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4], int num_y_electrode; int x_pitch, y_pitch, x_phys, y_phys; - num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); - num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + num_x_electrode = + SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F); + num_y_electrode = + SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F); - priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; - priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; + priv->x_max = + (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE; + priv->y_max = + (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE; - x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; - y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; + x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM; + y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM; + + } else { + num_x_electrode = + SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); + num_y_electrode = + SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); + + priv->x_max = + (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; + priv->y_max = + (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; + + x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; + y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; + } x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ @@ -2485,7 +2505,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], { unsigned char is_btnless; - is_btnless = (otp[1][1] >> 3) & 0x01; + if (IS_SS4PLUS_DEV(priv->dev_id)) + is_btnless = (otp[1][0] >> 1) & 0x01; + else + is_btnless = (otp[1][1] >> 3) & 0x01; if (is_btnless) priv->flags |= ALPS_BUTTONPAD; @@ -2493,6 +2516,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], return 0; } +static int alps_update_dual_info_ss4_v2(unsigned char otp[][4], + struct alps_data *priv) +{ + bool is_dual = false; + + if (IS_SS4PLUS_DEV(priv->dev_id)) + is_dual = (otp[0][0] >> 4) & 0x01; + + if (is_dual) + priv->flags |= ALPS_DUALPOINT | + ALPS_DUALPOINT_WITH_PRESSURE; + + return 0; +} + static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, struct alps_data *priv) { @@ -2508,6 +2546,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, alps_update_btn_info_ss4_v2(otp, priv); + alps_update_dual_info_ss4_v2(otp, priv); + return 0; } @@ -2753,10 +2793,6 @@ static int alps_set_protocol(struct psmouse *psmouse, if (alps_set_defaults_ss4_v2(psmouse, priv)) return -EIO; - if (priv->fw_ver[1] == 0x1) - priv->flags |= ALPS_DUALPOINT | - ALPS_DUALPOINT_WITH_PRESSURE; - break; } @@ -2827,10 +2863,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) ec[2] >= 0x90 && ec[2] <= 0x9d) { protocol = &alps_v3_protocol_data; } else if (e7[0] == 0x73 && e7[1] == 0x03 && - e7[2] == 0x14 && ec[1] == 0x02) { - protocol = &alps_v8_protocol_data; - } else if (e7[0] == 0x73 && e7[1] == 0x03 && - e7[2] == 0x28 && ec[1] == 0x01) { + (e7[2] == 0x14 || e7[2] == 0x28)) { protocol = &alps_v8_protocol_data; } else { psmouse_dbg(psmouse, @@ -2840,7 +2873,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) } if (priv) { - /* Save the Firmware version */ + /* Save Device ID and Firmware version */ + memcpy(priv->dev_id, e7, 3); memcpy(priv->fw_ver, ec, 3); error = alps_set_protocol(psmouse, priv, protocol); if (error) diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 6d279aa27cb9..4334f2805d93 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -54,6 +54,16 @@ enum SS4_PACKET_ID { #define SS4_MASK_NORMAL_BUTTONS 0x07 +#define SS4PLUS_COUNT_PER_ELECTRODE 128 +#define SS4PLUS_NUMSENSOR_XOFFSET 16 +#define SS4PLUS_NUMSENSOR_YOFFSET 5 +#define SS4PLUS_MIN_PITCH_MM 37 + +#define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \ + ((_b[1]) == 0x03) && \ + ((_b[2]) == 0x28) \ + ) + #define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ ((_b[1]) == 0x10) && \ ((_b[2]) == 0x00) && \ @@ -283,6 +293,7 @@ struct alps_data { int addr_command; u16 proto_version; u8 byte0, mask0; + u8 dev_id[3]; u8 fw_ver[3]; int flags; int x_max; -- cgit v1.2.3 From 47e6fb4212d09f325c0847d05985dd3d71553095 Mon Sep 17 00:00:00 2001 From: Masaki Ota Date: Fri, 17 Mar 2017 14:19:40 -0700 Subject: Input: ALPS - fix trackstick button handling on V8 devices Alps stick devices always have physical buttons, so we should not check ALPS_BUTTONPAD flag to decide whether we should report them. Fixes: 4777ac220c43 ("Input: ALPS - add touchstick support for SS5 hardware") Signed-off-by: Masaki Ota Acked-by: Pali Rohar Tested-by: Paul Donohue Tested-by: Nick Fletcher Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 262d9b620fcf..f210e19ddba6 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f, /* handle buttons */ if (pkt_id == SS4_PACKET_ID_STICK) { f->ts_left = !!(SS4_BTN_V2(p) & 0x01); - if (!(priv->flags & ALPS_BUTTONPAD)) { - f->ts_right = !!(SS4_BTN_V2(p) & 0x02); - f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); - } + f->ts_right = !!(SS4_BTN_V2(p) & 0x02); + f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); } else { f->left = !!(SS4_BTN_V2(p) & 0x01); if (!(priv->flags & ALPS_BUTTONPAD)) { -- cgit v1.2.3 From 7de32556dfc62b9e1203730cc26b71292da8a244 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 18 Mar 2017 00:57:39 +0100 Subject: cpufreq: intel_pstate: One set of global limits in active mode In the active mode intel_pstate currently uses two sets of global limits, each associated with one of the possible scaling_governor settings in that mode: "powersave" or "performance". The driver switches over from one of those sets to the other depending on the scaling_governor setting for the last CPU whose per-policy cpufreq interface in sysfs was last used to change parameters exposed in there. That obviously leads to no end of issues when the scaling_governor settings differ between CPUs. The most recent issue was introduced by commit a240c4aa5d0f (cpufreq: intel_pstate: Do not reinit performance limits in ->setpolicy) that eliminated the reinitialization of "performance" limits in intel_pstate_set_policy() preventing the max limit from being set to anything below 100, among other things. Namely, an undesirable side effect of commit a240c4aa5d0f is that now, after setting scaling_governor to "performance" in the active mode, the per-policy limits for the CPU in question go to the highest level and stay there even when it is switched back to "powersave" later. As it turns out, some distributions set scaling_governor to "performance" temporarily for all CPUs to speed-up system initialization, so that change causes them to misbehave later. To fix that, get rid of the performance/powersave global limits split and use just one set of global limits for everything. From the user's persepctive, after this modification, when scaling_governor is switched from "performance" to "powersave" or the other way around on one CPU, the limits settings (ie. the global max/min_perf_pct and per-policy scaling_max/min_freq for any CPUs) will not change. Still, switching from "performance" to "powersave" or the other way around changes the way in which P-states are selected and in particular "performance" causes the driver to always request the highest P-state it is allowed to ask for for the given CPU. Fixes: a240c4aa5d0f (cpufreq: intel_pstate: Do not reinit performance limits in ->setpolicy) Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 142 +++++++++++++---------------------------- 1 file changed, 46 insertions(+), 96 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 08e134ffba68..7b07803e7042 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -364,9 +364,7 @@ static bool driver_registered __read_mostly; static bool acpi_ppc; #endif -static struct perf_limits performance_limits; -static struct perf_limits powersave_limits; -static struct perf_limits *limits; +static struct perf_limits global; static void intel_pstate_init_limits(struct perf_limits *limits) { @@ -377,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits) limits->max_sysfs_pct = 100; } -static void intel_pstate_set_performance_limits(struct perf_limits *limits) -{ - intel_pstate_init_limits(limits); - limits->min_perf_pct = 100; - limits->min_perf = int_ext_tofp(1); - limits->min_sysfs_pct = 100; -} - static DEFINE_MUTEX(intel_pstate_driver_lock); static DEFINE_MUTEX(intel_pstate_limits_lock); @@ -507,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) * correct max turbo frequency based on the turbo state. * Also need to convert to MHz as _PSS freq is in MHz. */ - if (!limits->turbo_disabled) + if (!global.turbo_disabled) cpu->acpi_perf_data.states[0].core_frequency = policy->cpuinfo.max_freq / 1000; cpu->valid_pss_table = true; @@ -626,7 +616,7 @@ static inline void update_turbo_state(void) cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - limits->turbo_disabled = + global.turbo_disabled = (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); } @@ -851,7 +841,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { static void intel_pstate_hwp_set(struct cpufreq_policy *policy) { int min, hw_min, max, hw_max, cpu; - struct perf_limits *perf_limits = limits; + struct perf_limits *perf_limits = &global; u64 value, cap; for_each_cpu(cpu, policy->cpus) { @@ -863,19 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); hw_min = HWP_LOWEST_PERF(cap); - if (limits->no_turbo) + if (global.no_turbo) hw_max = HWP_GUARANTEED_PERF(cap); else hw_max = HWP_HIGHEST_PERF(cap); - min = fp_ext_toint(hw_max * perf_limits->min_perf); + max = fp_ext_toint(hw_max * perf_limits->max_perf); + if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) + min = max; + else + min = fp_ext_toint(hw_max * perf_limits->min_perf); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); - max = fp_ext_toint(hw_max * perf_limits->max_perf); value &= ~HWP_MAX_PERF(~0L); value |= HWP_MAX_PERF(max); @@ -968,20 +961,11 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) } static void intel_pstate_update_policies(void) - __releases(&intel_pstate_limits_lock) - __acquires(&intel_pstate_limits_lock) { - struct perf_limits *saved_limits = limits; int cpu; - mutex_unlock(&intel_pstate_limits_lock); - for_each_possible_cpu(cpu) cpufreq_update_policy(cpu); - - mutex_lock(&intel_pstate_limits_lock); - - limits = saved_limits; } /************************** debugfs begin ************************/ @@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void) static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ - return sprintf(buf, "%u\n", limits->object); \ + return sprintf(buf, "%u\n", global.object); \ } static ssize_t intel_pstate_show_status(char *buf); @@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj, } update_turbo_state(); - if (limits->turbo_disabled) - ret = sprintf(buf, "%u\n", limits->turbo_disabled); + if (global.turbo_disabled) + ret = sprintf(buf, "%u\n", global.turbo_disabled); else - ret = sprintf(buf, "%u\n", limits->no_turbo); + ret = sprintf(buf, "%u\n", global.no_turbo); mutex_unlock(&intel_pstate_driver_lock); @@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, mutex_lock(&intel_pstate_limits_lock); update_turbo_state(); - if (limits->turbo_disabled) { + if (global.turbo_disabled) { pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_driver_lock); return -EPERM; } - limits->no_turbo = clamp_t(int, input, 0, 1); - - intel_pstate_update_policies(); + global.no_turbo = clamp_t(int, input, 0, 1); mutex_unlock(&intel_pstate_limits_lock); + intel_pstate_update_policies(); + mutex_unlock(&intel_pstate_driver_lock); return count; @@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, mutex_lock(&intel_pstate_limits_lock); - limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); - limits->max_perf_pct = min(limits->max_policy_pct, - limits->max_sysfs_pct); - limits->max_perf_pct = max(limits->min_policy_pct, - limits->max_perf_pct); - limits->max_perf_pct = max(limits->min_perf_pct, - limits->max_perf_pct); - limits->max_perf = percent_ext_fp(limits->max_perf_pct); - - intel_pstate_update_policies(); + global.max_sysfs_pct = clamp_t(int, input, 0 , 100); + global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct); + global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct); + global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct); + global.max_perf = percent_ext_fp(global.max_perf_pct); mutex_unlock(&intel_pstate_limits_lock); + intel_pstate_update_policies(); + mutex_unlock(&intel_pstate_driver_lock); return count; @@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, mutex_lock(&intel_pstate_limits_lock); - limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); - limits->min_perf_pct = max(limits->min_policy_pct, - limits->min_sysfs_pct); - limits->min_perf_pct = min(limits->max_policy_pct, - limits->min_perf_pct); - limits->min_perf_pct = min(limits->max_perf_pct, - limits->min_perf_pct); - limits->min_perf = percent_ext_fp(limits->min_perf_pct); - - intel_pstate_update_policies(); + global.min_sysfs_pct = clamp_t(int, input, 0 , 100); + global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct); + global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct); + global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct); + global.min_perf = percent_ext_fp(global.min_perf_pct); mutex_unlock(&intel_pstate_limits_lock); + intel_pstate_update_policies(); + mutex_unlock(&intel_pstate_driver_lock); return count; @@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (limits->no_turbo && !limits->turbo_disabled) + if (global.no_turbo && !global.turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (limits->no_turbo && !limits->turbo_disabled) + if (global.no_turbo && !global.turbo_disabled) val |= (u64)1 << 32; return val; @@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) int max_perf = cpu->pstate.turbo_pstate; int max_perf_adj; int min_perf; - struct perf_limits *perf_limits = limits; + struct perf_limits *perf_limits = &global; - if (limits->no_turbo || limits->turbo_disabled) + if (global.no_turbo || global.turbo_disabled) max_perf = cpu->pstate.max_pstate; if (per_cpu_limits) @@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) sample->busy_scaled = busy_frac * 100; - target = limits->no_turbo || limits->turbo_disabled ? + target = global.no_turbo || global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; target += target >> 2; target = mul_fp(target, busy_frac); @@ -2116,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, static int intel_pstate_set_policy(struct cpufreq_policy *policy) { struct cpudata *cpu; - struct perf_limits *perf_limits = NULL; + struct perf_limits *perf_limits = &global; if (!policy->cpuinfo.max_freq) return -ENODEV; @@ -2139,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) mutex_lock(&intel_pstate_limits_lock); - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { - pr_debug("set performance\n"); - if (!perf_limits) { - limits = &performance_limits; - perf_limits = limits; - } - } else { - pr_debug("set powersave\n"); - if (!perf_limits) { - limits = &powersave_limits; - perf_limits = limits; - } - - } - intel_pstate_update_perf_limits(policy, perf_limits); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { @@ -2177,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) static int intel_pstate_verify_policy(struct cpufreq_policy *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - struct perf_limits *perf_limits; - - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) - perf_limits = &performance_limits; - else - perf_limits = &powersave_limits; update_turbo_state(); - policy->cpuinfo.max_freq = perf_limits->turbo_disabled || - perf_limits->no_turbo ? + policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; @@ -2201,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) unsigned int max_freq, min_freq; max_freq = policy->cpuinfo.max_freq * - perf_limits->max_sysfs_pct / 100; + global.max_sysfs_pct / 100; min_freq = policy->cpuinfo.max_freq * - perf_limits->min_sysfs_pct / 100; + global.min_sysfs_pct / 100; cpufreq_verify_within_limits(policy, min_freq, max_freq); } @@ -2255,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; update_turbo_state(); - policy->cpuinfo.max_freq = limits->turbo_disabled ? + policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; policy->cpuinfo.max_freq *= cpu->pstate.scaling; @@ -2275,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) return ret; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) + if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; @@ -2301,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) struct cpudata *cpu = all_cpu_data[policy->cpu]; update_turbo_state(); - policy->cpuinfo.max_freq = limits->turbo_disabled ? + policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; cpufreq_verify_within_cpu_limits(policy); @@ -2317,7 +2273,7 @@ static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu, update_turbo_state(); - max_freq = limits->no_turbo || limits->turbo_disabled ? + max_freq = global.no_turbo || global.turbo_disabled ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; policy->cpuinfo.max_freq = max_freq; if (policy->max > max_freq) @@ -2425,13 +2381,7 @@ static int intel_pstate_register_driver(void) { int ret; - intel_pstate_init_limits(&powersave_limits); - intel_pstate_set_performance_limits(&performance_limits); - if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) && - intel_pstate_driver == &intel_pstate) - limits = &performance_limits; - else - limits = &powersave_limits; + intel_pstate_init_limits(&global); ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { -- cgit v1.2.3 From 436ecf5519d892397af133a79ccd38a17c25fa51 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Fri, 17 Mar 2017 17:21:28 +0100 Subject: USB: serial: qcserial: add Dell DW5811e MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a Dell branded Sierra Wireless EM7455. Cc: Signed-off-by: Bjørn Mork Signed-off-by: Johan Hovold --- drivers/usb/serial/qcserial.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 696458db7e3c..38b3f0d8cd58 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ -- cgit v1.2.3 From 9c28ca4ff8bad7486182291a55b4f67a70af718d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 8 Mar 2017 00:09:59 -0800 Subject: target: Drop pointless tfo->check_stop_free check All in-tree fabric drivers provide a tfo->check_stop_free(), so there is no need to do the extra check within existing transport_cmd_check_stop_to_fabric() code. Just to be sure, add a check in target_fabric_tf_ops_check() to notify any out-of-tree drivers that might be missing it. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 4 ++++ drivers/target/target_core_transport.c | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 54b36c9835be..38b5025e4c7a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) pr_err("Missing tfo->aborted_task()\n"); return -EINVAL; } + if (!tfo->check_stop_free) { + pr_err("Missing tfo->check_stop_free()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 434d9d693989..b1a3cdb29468 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) * Fabric modules are expected to return '1' here if the se_cmd being * passed is released at this point, or zero if not being released. */ - return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) - : 0; + return cmd->se_tfo->check_stop_free(cmd); } static void transport_lun_remove_cmd(struct se_cmd *cmd) -- cgit v1.2.3 From 3abaa2bfdb1e6bb33d38a2e82cf3bb82ec0197bf Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 1 Mar 2017 23:14:39 -0600 Subject: tcmu: allow hw_max_sectors greater than 128 tcmu hard codes the hw_max_sectors to 128 which is a litle small. Userspace uses the max_sectors to report the optimal IO size and some initiators perform better with larger IOs (open-iscsi seems to do better with 256 to 512 depending on the test). (Fix do not display hw max sectors twice - MNC) Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 54 +++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c3adefe95e50..24e8580f07b8 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -960,7 +960,8 @@ static int tcmu_configure_device(struct se_device *dev) if (dev->dev_attrib.hw_block_size == 0) dev->dev_attrib.hw_block_size = 512; /* Other attributes can be configured in userspace */ - dev->dev_attrib.hw_max_sectors = 128; + if (!dev->dev_attrib.hw_max_sectors) + dev->dev_attrib.hw_max_sectors = 128; dev->dev_attrib.hw_queue_depth = 128; ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, @@ -1031,16 +1032,42 @@ static void tcmu_free_device(struct se_device *dev) } enum { - Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, + Opt_err, }; static match_table_t tokens = { {Opt_dev_config, "dev_config=%s"}, {Opt_dev_size, "dev_size=%u"}, {Opt_hw_block_size, "hw_block_size=%u"}, + {Opt_hw_max_sectors, "hw_max_sectors=%u"}, {Opt_err, NULL} }; +static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) +{ + unsigned long tmp_ul; + char *arg_p; + int ret; + + arg_p = match_strdup(arg); + if (!arg_p) + return -ENOMEM; + + ret = kstrtoul(arg_p, 0, &tmp_ul); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for dev attrib\n"); + return ret; + } + if (!tmp_ul) { + pr_err("dev attrib must be nonzero\n"); + return -EINVAL; + } + *dev_attrib = tmp_ul; + return 0; +} + static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, const char *page, ssize_t count) { @@ -1048,7 +1075,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; - unsigned long tmp_ul; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -1082,22 +1108,12 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, pr_err("kstrtoul() failed for dev_size=\n"); break; case Opt_hw_block_size: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - break; - } - ret = kstrtoul(arg_p, 0, &tmp_ul); - kfree(arg_p); - if (ret < 0) { - pr_err("kstrtoul() failed for hw_block_size=\n"); - break; - } - if (!tmp_ul) { - pr_err("hw_block_size must be nonzero\n"); - break; - } - dev->dev_attrib.hw_block_size = tmp_ul; + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_block_size)); + break; + case Opt_hw_max_sectors: + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_max_sectors)); break; default: break; -- cgit v1.2.3 From 2579325ca0acc598fdf41ba12b2871d3467f28df Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 1 Mar 2017 23:14:40 -0600 Subject: tcmu: return on first Opt parse failure We only were returing failure if the last opt to be parsed failed. This has a return failure when we first detect a failure. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 24e8580f07b8..4339ab2133b3 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1118,6 +1118,9 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, default: break; } + + if (ret) + break; } kfree(orig); -- cgit v1.2.3 From 530c6891b1220cba780b6c18f4691d85a3435080 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 1 Mar 2017 23:13:24 -0600 Subject: target: allow ALUA setup for some passthrough backends This patch allows passthrough backends to use the core/base LIO ALUA setup and state checks, but still handle the execution of commands. This will allow the target_core_user module to execute STPG and RTPG in userspace, and not have to duplicate the ALUA state checks, path information (needed so we can check if command is executable on specific paths) and setup (rtslib sets/updates the configfs ALUA interface like it does for iblock or file). For STPG, the target_core_user userspace daemon, tcmu-runner will still execute the STPG, and to update the core/base LIO state it will use the existing configfs interface. For RTPG, tcmu-runner will loop over configfs and/or cache the state. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 9 +++++---- drivers/target/target_core_pscsi.c | 3 ++- drivers/target/target_core_tpg.c | 3 ++- include/target/target_core_backend.h | 7 ++++++- 4 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index f5e330099bfc..a41bbb8087cf 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) return 0; /* @@ -1973,7 +1973,7 @@ ssize_t core_alua_store_tg_pt_gp_info( unsigned char buf[TG_PT_GROUP_NAME_BUF]; int move = 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) return -ENODEV; @@ -2230,7 +2230,7 @@ ssize_t core_alua_store_offline_bit( unsigned long tmp; int ret; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) return -ENODEV; @@ -2316,7 +2316,8 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev) { - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { struct t10_alua_lu_gp_member *lu_gp_mem; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 44d92f23a3f0..94cda7991e80 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1080,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate) static const struct target_backend_ops pscsi_ops = { .name = "pscsi", .owner = THIS_MODULE, - .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH | + TRANSPORT_FLAG_PASSTHROUGH_ALUA, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index c0dbfa016575..6fb191914f45 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -602,7 +602,8 @@ int core_tpg_add_lun( if (ret) goto out_kill_ref; - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index b54b98dc2d4a..1b0f447ce850 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -4,7 +4,12 @@ #include #include -#define TRANSPORT_FLAG_PASSTHROUGH 1 +#define TRANSPORT_FLAG_PASSTHROUGH 0x1 +/* + * ALUA commands, state checks and setup operations are handled by the + * backend module. + */ +#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 struct request_queue; struct scatterlist; -- cgit v1.2.3 From 0a4145729871ef29afe8b0c57560a1f5bd736416 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 1 Mar 2017 23:13:25 -0600 Subject: target: fail ALUA transitions for pscsi We do not setup the LU group for pscsi devices, so if you write a state to alua_access_state that will cause a transition you will get a NULL pointer dereference. This patch will fail attempts to try and transition the path for backend devices that set the TRANSPORT_FLAG_PASSTHROUGH_ALUA flag. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index a41bbb8087cf..5b5a1e250a65 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1149,6 +1149,9 @@ int core_alua_do_port_transition( struct t10_alua_tg_pt_gp *tg_pt_gp; int primary, valid_states, rc = 0; + if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) + return -ENODEV; + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; if (core_alua_check_transition(new_state, valid_states, &primary) != 0) return -EINVAL; -- cgit v1.2.3 From 207ee84133c00a8a2a5bdec94df4a5b37d78881c Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 1 Mar 2017 23:13:26 -0600 Subject: target: Use system workqueue for ALUA transitions If tcmu-runner is processing a STPG and needs to change the kernel's ALUA state then we cannot use the same work queue for task management requests and ALUA transitions, because we could deadlock. The problem occurs when a STPG times out before tcmu-runner is able to call into target_tg_pt_gp_alua_access_state_store-> core_alua_do_port_transition -> core_alua_do_transition_tg_pt -> queue_work. In this case, the tmr is on the work queue waiting for the STPG to complete, but the STPG transition is now queued behind the waiting tmr. Note: This bug will also be fixed by this patch: http://www.spinics.net/lists/target-devel/msg14560.html which switches the tmr code to use the system workqueues. For both, I am not sure if we need a dedicated workqueue since it is not a performance path and I do not think we need WQ_MEM_RECLAIM to make forward progress to free up memory like the block layer does. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 5b5a1e250a65..58bf5e6350ac 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1121,13 +1121,11 @@ static int core_alua_do_transition_tg_pt( unsigned long transition_tmo; transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, - &tg_pt_gp->tg_pt_gp_transition_work, - transition_tmo); + schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, + transition_tmo); } else { tg_pt_gp->tg_pt_gp_transition_complete = &wait; - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, - &tg_pt_gp->tg_pt_gp_transition_work, 0); + schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, 0); wait_for_completion(&wait); tg_pt_gp->tg_pt_gp_transition_complete = NULL; } -- cgit v1.2.3 From d7175373f2745ed4abe5b388d5aabd06304f801e Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 2 Mar 2017 04:59:48 -0600 Subject: target: fix ALUA transition timeout handling The implicit transition time tells initiators the min time to wait before timing out a transition. We currently schedule the transition to occur in tg_pt_gp_implicit_trans_secs seconds so there is no room for delays. If core_alua_do_transition_tg_pt_work->core_alua_update_tpg_primary_metadata needs to write out info to a remote file, then the initiator can easily time out the operation. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 23 ++++++++--------------- include/target/target_core_base.h | 2 +- 2 files changed, 9 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 58bf5e6350ac..594807cd92cb 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1013,7 +1013,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) static void core_alua_do_transition_tg_pt_work(struct work_struct *work) { struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); @@ -1076,13 +1076,12 @@ static int core_alua_do_transition_tg_pt( /* * Flush any pending transitions */ - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == + if (!explicit && atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == ALUA_ACCESS_STATE_TRANSITION) { /* Just in case */ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; tg_pt_gp->tg_pt_gp_transition_complete = &wait; - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); wait_for_completion(&wait); tg_pt_gp->tg_pt_gp_transition_complete = NULL; return 0; @@ -1117,15 +1116,9 @@ static int core_alua_do_transition_tg_pt( atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { - unsigned long transition_tmo; - - transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; - schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, - transition_tmo); - } else { + schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); + if (explicit) { tg_pt_gp->tg_pt_gp_transition_complete = &wait; - schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, 0); wait_for_completion(&wait); tg_pt_gp->tg_pt_gp_transition_complete = NULL; } @@ -1696,8 +1689,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); - INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, - core_alua_do_transition_tg_pt_work); + INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, + core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); @@ -1805,7 +1798,7 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 37c274e61acc..4b784b6e21c0 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp { struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; - struct delayed_work tg_pt_gp_transition_work; + struct work_struct tg_pt_gp_transition_work; struct completion *tg_pt_gp_transition_complete; }; -- cgit v1.2.3 From 1ca4d4fa3bfcbe8964f81e5818a9b90436466eb0 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 2 Mar 2017 04:59:49 -0600 Subject: target: allow userspace to set state to transitioning Userspace target_core_user handlers like tcmu-runner may want to set the ALUA state to transitioning while it does implicit transitions. This patch allows that state when set from configfs. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 594807cd92cb..252d4e4b7b33 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -43,7 +43,7 @@ #include "target_core_ua.h" static sense_reason_t core_alua_check_transition(int state, int valid, - int *primary); + int *primary, int explicit); static int core_alua_set_tg_pt_secondary_state( struct se_lun *lun, int explicit, int offline); @@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * the state is a primary or secondary target port asymmetric * access state. */ - rc = core_alua_check_transition(alua_access_state, - valid_states, &primary); + rc = core_alua_check_transition(alua_access_state, valid_states, + &primary, 1); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish @@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd) * Check implicit and explicit ALUA state change request. */ static sense_reason_t -core_alua_check_transition(int state, int valid, int *primary) +core_alua_check_transition(int state, int valid, int *primary, int explicit) { /* * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are @@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary) *primary = 0; break; case ALUA_ACCESS_STATE_TRANSITION: - /* - * Transitioning is set internally, and - * cannot be selected manually. - */ - goto not_supported; + if (!(valid & ALUA_T_SUP) || explicit) + /* + * Transitioning is set internally and by tcmu daemon, + * and cannot be selected through a STPG. + */ + goto not_supported; + *primary = 0; + break; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; @@ -1070,7 +1073,7 @@ static int core_alua_do_transition_tg_pt( if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) return 0; - if (new_state == ALUA_ACCESS_STATE_TRANSITION) + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) return -EAGAIN; /* @@ -1091,10 +1094,6 @@ static int core_alua_do_transition_tg_pt( * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? @@ -1103,6 +1102,13 @@ static int core_alua_do_transition_tg_pt( core_alua_queue_state_change_ua(tg_pt_gp); + if (new_state == ALUA_ACCESS_STATE_TRANSITION) + return 0; + + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + /* * Check for the optional ALUA primary state transition delay */ @@ -1144,7 +1150,8 @@ int core_alua_do_port_transition( return -ENODEV; valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; - if (core_alua_check_transition(new_state, valid_states, &primary) != 0) + if (core_alua_check_transition(new_state, valid_states, &primary, + explicit) != 0) return -EINVAL; local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; -- cgit v1.2.3 From 760bf578edf8122f2503a3a6a3f4b0de3b6ce0bb Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 2 Mar 2017 04:59:50 -0600 Subject: target: fix race during implicit transition work flushes This fixes the following races: 1. core_alua_do_transition_tg_pt could have read tg_pt_gp_alua_access_state and gone into this if chunk: if (!explicit && atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == ALUA_ACCESS_STATE_TRANSITION) { and then core_alua_do_transition_tg_pt_work could update the state. core_alua_do_transition_tg_pt would then only set tg_pt_gp_alua_pending_state and the tg_pt_gp_alua_access_state would not get updated with the second calls state. 2. core_alua_do_transition_tg_pt could be setting tg_pt_gp_transition_complete while the tg_pt_gp_transition_work is already completing. core_alua_do_transition_tg_pt then waits on the completion that will never be called. To handle these issues, we just call flush_work which will return when core_alua_do_transition_tg_pt_work has completed so there is no need to do the complete/wait. And, if core_alua_do_transition_tg_pt_work was running, instead of trying to sneak in the state change, we just schedule up another core_alua_do_transition_tg_pt_work call. Note that this does not handle a possible race where there are multiple threads call core_alua_do_transition_tg_pt at the same time. I think we need a mutex in target_tg_pt_gp_alua_access_state_store. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 252d4e4b7b33..fd7c16a7ca6e 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1079,16 +1079,8 @@ static int core_alua_do_transition_tg_pt( /* * Flush any pending transitions */ - if (!explicit && atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == - ALUA_ACCESS_STATE_TRANSITION) { - /* Just in case */ - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - tg_pt_gp->tg_pt_gp_transition_complete = &wait; + if (!explicit) flush_work(&tg_pt_gp->tg_pt_gp_transition_work); - wait_for_completion(&wait); - tg_pt_gp->tg_pt_gp_transition_complete = NULL; - return 0; - } /* * Save the old primary ALUA access state, and set the current state -- cgit v1.2.3 From 972c7f167974fa41ea8a2eed4b857cc59f59c42c Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 9 Mar 2017 02:42:08 -0600 Subject: tcmu: add helper to check if dev was configured This adds a helper to check if the dev was configured. It will be used in the next patch to prevent updates to some config settings after the device has been setup. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 4339ab2133b3..892b311e7874 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -998,6 +998,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p) kfree(udev); } +static bool tcmu_dev_configured(struct tcmu_dev *udev) +{ + return udev->uio_info.uio_dev ? true : false; +} + static void tcmu_free_device(struct se_device *dev) { struct tcmu_dev *udev = TCMU_DEV(dev); @@ -1019,8 +1024,7 @@ static void tcmu_free_device(struct se_device *dev) spin_unlock_irq(&udev->commands_lock); WARN_ON(!all_expired); - /* Device was configured */ - if (udev->uio_info.uio_dev) { + if (tcmu_dev_configured(udev)) { tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, udev->uio_info.uio_dev->minor); -- cgit v1.2.3 From af980e46a26ac8805685bb70c8572dbc47abb126 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 9 Mar 2017 02:42:09 -0600 Subject: tcmu: make cmd timeout configurable A single daemon could implement multiple types of devices using multuple types of real devices that may not support restarting from crashes and/or handling tcmu timeouts. This makes the cmd timeout configurable, so handlers that do not support it can turn if off for now. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 41 +++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 892b311e7874..10cc15f0b1fa 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -112,6 +112,7 @@ struct tcmu_dev { spinlock_t commands_lock; struct timer_list timeout; + unsigned int cmd_time_out; char dev_config[TCMU_CONFIG_LEN]; }; @@ -172,7 +173,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); + if (udev->cmd_time_out) + tcmu_cmd->deadline = jiffies + + msecs_to_jiffies(udev->cmd_time_out); idr_preload(GFP_KERNEL); spin_lock_irq(&udev->commands_lock); @@ -451,7 +454,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) pr_debug("sleeping for ring space\n"); spin_unlock_irq(&udev->cmdr_lock); - ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); + if (udev->cmd_time_out) + ret = schedule_timeout( + msecs_to_jiffies(udev->cmd_time_out)); + else + ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); finish_wait(&udev->wait_cmdr, &__wait); if (!ret) { pr_warn("tcmu: command timed out\n"); @@ -526,8 +533,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); - mod_timer(&udev->timeout, - round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); + if (udev->cmd_time_out) + mod_timer(&udev->timeout, round_jiffies_up(jiffies + + msecs_to_jiffies(udev->cmd_time_out))); return TCM_NO_SENSE; } @@ -742,6 +750,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) } udev->hba = hba; + udev->cmd_time_out = TCMU_TIME_OUT; init_waitqueue_head(&udev->wait_cmdr); spin_lock_init(&udev->cmdr_lock); @@ -1037,7 +1046,7 @@ static void tcmu_free_device(struct se_device *dev) enum { Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, - Opt_err, + Opt_cmd_time_out, Opt_err, }; static match_table_t tokens = { @@ -1045,6 +1054,7 @@ static match_table_t tokens = { {Opt_dev_size, "dev_size=%u"}, {Opt_hw_block_size, "hw_block_size=%u"}, {Opt_hw_max_sectors, "hw_max_sectors=%u"}, + {Opt_cmd_time_out, "cmd_time_out=%u"}, {Opt_err, NULL} }; @@ -1111,6 +1121,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, if (ret < 0) pr_err("kstrtoul() failed for dev_size=\n"); break; + case Opt_cmd_time_out: + if (tcmu_dev_configured(udev)) { + pr_err("Can not update cmd_time_out after device has been configured.\n"); + ret = -EINVAL; + break; + } + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtouint(arg_p, 0, &udev->cmd_time_out); + kfree(arg_p); + if (ret < 0) + pr_err("kstrtouint() failed for cmd_time_out=\n"); + udev->cmd_time_out *= MSEC_PER_SEC; + break; case Opt_hw_block_size: ret = tcmu_set_dev_attrib(&args[0], &(dev->dev_attrib.hw_block_size)); @@ -1138,7 +1165,9 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) bl = sprintf(b + bl, "Config: %s ", udev->dev_config[0] ? udev->dev_config : "NULL"); - bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); + bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); + bl += sprintf(b + bl, "Cmd Time Out: %lu\n", + udev->cmd_time_out / MSEC_PER_SEC); return bl; } -- cgit v1.2.3 From 7d7a743543905a8297dce53b36e793e5307da5d7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 18 Mar 2017 15:04:13 -0700 Subject: tcmu: Convert cmd_time_out into backend device attribute Instead of putting cmd_time_out under ../target/core/user_0/foo/control, which has historically been used by parameters needed for initial backend device configuration, go ahead and move cmd_time_out into a backend device attribute. In order to do this, tcmu_module_init() has been updated to create a local struct configfs_attribute **tcmu_attrs, that is based upon the existing passthrough_attrib_attrs along with the new cmd_time_out attribute. Once **tcm_attrs has been setup, go ahead and point it at tcmu_ops->tb_dev_attrib_attrs so it's picked up by target-core. Also following MNC's previous change, ->cmd_time_out is stored in milliseconds but exposed via configfs in seconds. Also, note this patch restricts the modification of ->cmd_time_out to before + after the TCMU device has been configured, but not while it has active fabric exports. Cc: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 94 ++++++++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 10cc15f0b1fa..c6874c38a10b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -1046,7 +1047,7 @@ static void tcmu_free_device(struct se_device *dev) enum { Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, - Opt_cmd_time_out, Opt_err, + Opt_err, }; static match_table_t tokens = { @@ -1054,7 +1055,6 @@ static match_table_t tokens = { {Opt_dev_size, "dev_size=%u"}, {Opt_hw_block_size, "hw_block_size=%u"}, {Opt_hw_max_sectors, "hw_max_sectors=%u"}, - {Opt_cmd_time_out, "cmd_time_out=%u"}, {Opt_err, NULL} }; @@ -1121,23 +1121,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, if (ret < 0) pr_err("kstrtoul() failed for dev_size=\n"); break; - case Opt_cmd_time_out: - if (tcmu_dev_configured(udev)) { - pr_err("Can not update cmd_time_out after device has been configured.\n"); - ret = -EINVAL; - break; - } - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - break; - } - ret = kstrtouint(arg_p, 0, &udev->cmd_time_out); - kfree(arg_p); - if (ret < 0) - pr_err("kstrtouint() failed for cmd_time_out=\n"); - udev->cmd_time_out *= MSEC_PER_SEC; - break; case Opt_hw_block_size: ret = tcmu_set_dev_attrib(&args[0], &(dev->dev_attrib.hw_block_size)); @@ -1165,9 +1148,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) bl = sprintf(b + bl, "Config: %s ", udev->dev_config[0] ? udev->dev_config : "NULL"); - bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); - bl += sprintf(b + bl, "Cmd Time Out: %lu\n", - udev->cmd_time_out / MSEC_PER_SEC); + bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); return bl; } @@ -1186,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd) return passthrough_parse_cdb(cmd, tcmu_queue_cmd); } -static const struct target_backend_ops tcmu_ops = { +static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + + return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); +} + +static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + u32 val; + int ret; + + if (da->da_dev->export_count) { + pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); + return -EINVAL; + } + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (!val) { + pr_err("Illegal value for cmd_time_out\n"); + return -EINVAL; + } + + udev->cmd_time_out = val * MSEC_PER_SEC; + return count; +} +CONFIGFS_ATTR(tcmu_, cmd_time_out); + +static struct configfs_attribute **tcmu_attrs; + +static struct target_backend_ops tcmu_ops = { .name = "user", .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, @@ -1200,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = { .show_configfs_dev_params = tcmu_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = tcmu_get_blocks, - .tb_dev_attrib_attrs = passthrough_attrib_attrs, + .tb_dev_attrib_attrs = NULL, }; static int __init tcmu_module_init(void) { - int ret; + int ret, i, len = 0; BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); @@ -1227,12 +1249,31 @@ static int __init tcmu_module_init(void) goto out_unreg_device; } + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { + len += sizeof(struct configfs_attribute *); + } + len += sizeof(struct configfs_attribute *) * 2; + + tcmu_attrs = kzalloc(len, GFP_KERNEL); + if (!tcmu_attrs) { + ret = -ENOMEM; + goto out_unreg_genl; + } + + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { + tcmu_attrs[i] = passthrough_attrib_attrs[i]; + } + tcmu_attrs[i] = &tcmu_attr_cmd_time_out; + tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; + ret = transport_backend_register(&tcmu_ops); if (ret) - goto out_unreg_genl; + goto out_attrs; return 0; +out_attrs: + kfree(tcmu_attrs); out_unreg_genl: genl_unregister_family(&tcmu_genl_family); out_unreg_device: @@ -1246,6 +1287,7 @@ out_free_cache: static void __exit tcmu_module_exit(void) { target_backend_unregister(&tcmu_ops); + kfree(tcmu_attrs); genl_unregister_family(&tcmu_genl_family); root_device_unregister(tcmu_root_device); kmem_cache_destroy(tcmu_cmd_cache); -- cgit v1.2.3 From c4a9b538ab2a109c5f9798bea1f8f4bf93aadfb9 Mon Sep 17 00:00:00 2001 From: Joe Carnuccio Date: Wed, 15 Mar 2017 09:48:43 -0700 Subject: qla2xxx: Allow vref count to timeout on vport delete. Cc: Signed-off-by: Joe Carnuccio Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_attr.c | 4 +--- drivers/scsi/qla2xxx/qla_def.h | 6 +++++- drivers/scsi/qla2xxx/qla_init.c | 1 + drivers/scsi/qla2xxx/qla_mid.c | 14 ++++++++------ drivers/scsi/qla2xxx/qla_os.c | 1 + 5 files changed, 16 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f610103994af..435ff7fd6384 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) "Timer for the VP[%d] has stopped\n", vha->vp_idx); } - BUG_ON(atomic_read(&vha->vref_count)); - qla2x00_free_fcports(vha); mutex_lock(&ha->vport_lock); @@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); - if (vha->qpair->vp_idx == vha->vp_idx) { + if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, "Queue Pair delete failed.\n"); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 625d438e3cce..8662ef4192db 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4076,6 +4076,7 @@ typedef struct scsi_qla_host { /* Count of active session/fcport */ int fcport_count; wait_queue_head_t fcport_waitQ; + wait_queue_head_t vref_waitq; } scsi_qla_host_t; struct qla27xx_image_status { @@ -4131,14 +4132,17 @@ struct qla2_sgx { mb(); \ if (__vha->flags.delete_progress) { \ atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ __bail = 1; \ } else { \ __bail = 0; \ } \ } while (0) -#define QLA_VHA_MARK_NOT_BUSY(__vha) \ +#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ +} while (0) \ #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ atomic_inc(&__qpair->ref_count); \ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 32fb9007f137..9f3740c68cc8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -5148,6 +5148,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) } } atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } spin_unlock_irqrestore(&ha->vport_slock, flags); } diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c6d6f0d912ff..09a490c98763 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - spin_lock_irqsave(&ha->vport_slock, flags); - while (atomic_read(&vha->vref_count)) { - spin_unlock_irqrestore(&ha->vport_slock, flags); - - msleep(500); + wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), + 10*HZ); - spin_lock_irqsave(&ha->vport_slock, flags); + spin_lock_irqsave(&ha->vport_slock, flags); + if (atomic_read(&vha->vref_count)) { + ql_dbg(ql_dbg_vport, vha, 0xfffa, + "vha->vref_count=%u timeout\n", vha->vref_count.counter); + vha->vref_count = (atomic_t)ATOMIC_INIT(0); } list_del(&vha->list); qlt_update_vp_map(vha, RESET_VP_IDX); @@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } i++; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1fed235a1b4a..54d4e802bde0 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4268,6 +4268,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); init_waitqueue_head(&vha->fcport_waitQ); + init_waitqueue_head(&vha->vref_waitq); vha->gnl.size = sizeof(struct get_name_list_extended) * (ha->max_loop_id + 1); -- cgit v1.2.3 From ae940f2c472a62904dc18234de5cf3ed28f195ee Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:44 -0700 Subject: qla2xxx: Fix memory leak for abts processing Cc: Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 45f5077684f0..ecf97c5993e8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6642,6 +6642,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(op); } void -- cgit v1.2.3 From 8b666809e10cda9814af3e8be339d35b83909056 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:45 -0700 Subject: qla2xxx: Fix request queue corruption. When FW notify driver or driver detects low FW resource, driver tries to send out Busy SCSI Status to tell Initiator side to back off. During the send process, the lock was not held. Cc: Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ecf97c5993e8..a463bcc57902 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -5079,16 +5079,22 @@ qlt_send_busy(struct scsi_qla_host *vha, static int qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, - struct atio_from_isp *atio) + struct atio_from_isp *atio, bool ha_locked) { struct qla_hw_data *ha = vha->hw; uint16_t status; + unsigned long flags; if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) return 0; + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); status = temp_sam_status; qlt_send_busy(vha, atio, status); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 1; } @@ -5133,7 +5139,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { - rc = qlt_chk_qfull_thresh_hold(vha, atio); + rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); if (rc != 0) { tgt->atio_irq_cmd_count--; return; @@ -5256,7 +5262,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) break; } - rc = qlt_chk_qfull_thresh_hold(vha, atio); + rc = qlt_chk_qfull_thresh_hold(vha, atio, true); if (rc != 0) { tgt->irq_cmd_count--; return; -- cgit v1.2.3 From 8f6fc8d4e7ae2347d6261d11a7eb2b247d2954d8 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:46 -0700 Subject: qla2xxx: Fix inadequate lock protection for ABTS. Normally, ABTS is sent to Target Core as Task MGMT command. In the case of error, qla2xxx needs to send response, hardware_lock is required to prevent request queue corruption. Cc: Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a463bcc57902..a78c3e9bcb57 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, fc_port_t *fcport, bool local); void qlt_unreg_sess(struct fc_port *sess); +static void qlt_24xx_handle_abts(struct scsi_qla_host *, + struct abts_recv_from_24xx *); + /* * Global Variables */ @@ -389,6 +392,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, (struct abts_recv_from_24xx *)atio; struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, entry->vp_index); + unsigned long flags; + if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xffff, "qla_target(%d): Response pkt (ABTS_RECV_24XX) " @@ -396,9 +401,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, vha->vp_idx, entry->vp_index); break; } - qlt_response_pkt(host, (response_t *)atio); + if (!ha_locked) + spin_lock_irqsave(&host->hw->hardware_lock, flags); + qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); + if (!ha_locked) + spin_unlock_irqrestore(&host->hw->hardware_lock, flags); break; - } /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ -- cgit v1.2.3 From f159b3c7cd45c550d0f73806451a10b6b6bc08ae Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:47 -0700 Subject: qla2xxx: Fix sess_lock & hardware_lock lock order problem. The main lock that needs to be held for CMD or TMR submission to upper layer is the sess_lock. The sess_lock is used to serialize cmd submission and session deletion. The addition of hardware_lock being held is not necessary. This patch removes hardware_lock dependency from CMD/TMR submission. Use hardware_lock only for error response in this case. Path1 CPU0 CPU1 ---- ---- lock(&(&ha->tgt.sess_lock)->rlock); lock(&(&ha->hardware_lock)->rlock); lock(&(&ha->tgt.sess_lock)->rlock); lock(&(&ha->hardware_lock)->rlock); Path2/deadlock *** DEADLOCK *** Call Trace: dump_stack+0x85/0xc2 print_circular_bug+0x1e3/0x250 __lock_acquire+0x1425/0x1620 lock_acquire+0xbf/0x210 _raw_spin_lock_irqsave+0x53/0x70 qlt_sess_work_fn+0x21d/0x480 [qla2xxx] process_one_work+0x1f4/0x6e0 Cc: Cc: Bart Van Assche Reported-by: Bart Van Assche Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 41 +++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a78c3e9bcb57..989f931af156 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -5727,30 +5727,23 @@ static void qlt_abort_work(struct qla_tgt *tgt, } } - spin_lock_irqsave(&ha->hardware_lock, flags); - - if (tgt->tgt_stop) - goto out_term; - rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + if (rc != 0) goto out_term; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (sess) - ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; out_term2: - spin_lock_irqsave(&ha->hardware_lock, flags); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (sess) - ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } static void qlt_tmr_work(struct qla_tgt *tgt, @@ -5770,7 +5763,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (tgt->tgt_stop) - goto out_term; + goto out_term2; s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); @@ -5782,11 +5775,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt, spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (!sess) - goto out_term; + goto out_term2; } else { if (sess->deleted) { sess = NULL; - goto out_term; + goto out_term2; } if (!kref_get_unless_zero(&sess->sess_kref)) { @@ -5794,7 +5787,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, "%s: kref_get fail %8phC\n", __func__, sess->port_name); sess = NULL; - goto out_term; + goto out_term2; } } @@ -5804,17 +5797,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt, unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); - if (rc != 0) - goto out_term; - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (rc != 0) + goto out_term; return; +out_term2: + if (sess) + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); - ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_sess_work_fn(struct work_struct *work) -- cgit v1.2.3 From 5b33469a055c77001fd2c62b0f985c991b0e5b65 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:48 -0700 Subject: qla2xxx: Allow relogin to proceed if remote login did not finish If the remote port have started the login process, then the PLOGI and PRLI should be back to back. Driver will allow the remote port to complete the process. For the case where the remote port decide to back off from sending PRLI, this local port sets an expiration timer for the PRLI. Once the expiration time passes, the relogin retry logic is allowed to go through and perform login with the remote port. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_init.c | 12 ++++++++++-- drivers/scsi/qla2xxx/qla_isr.c | 25 +++++++++++++++++++------ drivers/scsi/qla2xxx/qla_target.c | 1 + 4 files changed, 32 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8662ef4192db..56cd45fc600a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2300,6 +2300,8 @@ typedef struct fc_port { struct ct_sns_desc ct_desc; enum discovery_state disc_state; enum login_state fw_login_state; + unsigned long plogi_nack_done_deadline; + u32 login_gen, last_login_gen; u32 rscn_gen, last_rscn_gen; u32 chip_reset; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9f3740c68cc8..a7865a5d556d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -876,10 +876,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->login_retry--; if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) return 0; + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) + return 0; + } + /* for pure Target Mode. Login will not be initiated */ if (vha->host->active_mode == MODE_TARGET) return 0; @@ -1041,10 +1045,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, fcport->flags); if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) return; + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) + return; + } + if (fcport->flags & FCF_ASYNC_SENT) { fcport->login_retry++; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3c66ea29de27..b2c6da752edd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1620,9 +1620,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5034, - "Async-%s error entry - hdl=%x" + "Async-%s error entry - %8phC hdl=%x" "portid=%02x%02x%02x entry-status=%x.\n", - type, sp->handle, fcport->d_id.b.domain, + type, fcport->port_name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, logio->entry_status); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, @@ -1633,8 +1633,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { ql_dbg(ql_dbg_async, fcport->vha, 0x5036, - "Async-%s complete - hdl=%x portid=%02x%02x%02x " - "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, + "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " + "iop0=%x.\n", type, fcport->port_name, sp->handle, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le32_to_cpu(logio->io_parameter[0])); @@ -1674,6 +1675,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; + case LSC_SCODE_CMD_FAILED: + if (iop[1] == 0x0606) { + /* + * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, + * Target side acked. + */ + data[0] = MBS_COMMAND_COMPLETE; + goto logio_done; + } + data[0] = MBS_COMMAND_ERROR; + break; case LSC_SCODE_NOXCB: vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { @@ -1695,8 +1707,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, } ql_dbg(ql_dbg_async, fcport->vha, 0x5037, - "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " - "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, + "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " + "iop0=%x iop1=%x.\n", type, fcport->port_name, + sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 989f931af156..925d9b858b24 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -562,6 +562,7 @@ void qla2x00_async_nack_sp_done(void *s, int res) sp->fcport->login_gen++; sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; sp->fcport->logout_on_delete = 1; + sp->fcport->plogi_nack_done_deadline = jiffies + HZ; break; case SRB_NACK_PRLI: -- cgit v1.2.3 From be25152c0d9e236076323abbe9def9714234b761 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:49 -0700 Subject: qla2xxx: Improve T10-DIF/PI handling in driver. Add routines to support T10 DIF tag. Signed-off-by: Quinn Tran Signed-off-by: Anil Gurumurthy Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_dbg.h | 1 + drivers/scsi/qla2xxx/qla_def.h | 10 + drivers/scsi/qla2xxx/qla_gbl.h | 6 +- drivers/scsi/qla2xxx/qla_iocb.c | 13 +- drivers/scsi/qla2xxx/qla_target.c | 540 ++++++++++++++++++++++--------------- drivers/scsi/qla2xxx/qla_target.h | 38 ++- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 49 ++-- 7 files changed, 406 insertions(+), 251 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index e1fc4e66966a..c6bffe929fe7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); #define ql_dbg_tgt 0x00004000 /* Target mode */ #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ +#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 56cd45fc600a..9d1d3dcf1c87 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3127,6 +3127,16 @@ struct bidi_statistics { unsigned long long transfer_bytes; }; +struct qla_tc_param { + struct scsi_qla_host *vha; + uint32_t blk_sz; + uint32_t bufflen; + struct scatterlist *sg; + struct scatterlist *prot_sg; + struct crc_context *ctx; + uint8_t *ctx_dsd_alloced; +}; + /* Multi queue support */ #define MBC_INITIALIZE_MULTIQ 0x1f #define QLA_QUE_PAGE 0X1000 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index b3d6441d1d90..ca6f122e5865 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -256,11 +256,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); extern int qla2x00_issue_marker(scsi_qla_host_t *, int); extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 535079280288..ea027f6a7fd4 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, struct scatterlist *sg_prot; uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; @@ -966,7 +965,7 @@ alloc_and_fill: } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } @@ -1005,7 +1004,7 @@ alloc_and_fill: int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, - uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ @@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 925d9b858b24..532004981dbd 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -143,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); +static const char *prot_op_str(u32 prot_op) +{ + switch (prot_op) { + case TARGET_PROT_NORMAL: return "NORMAL"; + case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; + case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; + case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; + case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; + case TARGET_PROT_DIN_PASS: return "DIN_PASS"; + case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; + default: return "UNKNOWN"; + } +} + /* This API intentionally takes dest as a parameter, rather than returning * int value to avoid caller forgetting to issue wmb() after the store */ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) @@ -2022,6 +2036,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) } EXPORT_SYMBOL(qlt_free_mcmd); +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then + * reacquire + */ +void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, + uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) +{ + struct atio_from_isp *atio = &cmd->atio; + struct ctio7_to_24xx *ctio; + uint16_t temp; + + ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, + "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " + "sense_key=%02x, asc=%02x, ascq=%02x", + vha, atio, scsi_status, sense_key, asc, ascq); + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + if (!ctio) { + ql_dbg(ql_dbg_async, vha, 0x3067, + "qla2x00t(%ld): %s failed: unable to allocate request packet", + vha->host_no, __func__); + goto out; + } + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE; + ctio->nport_handle = cmd->sess->loop_id; + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio->exchange_addr = atio->u.isp24.exchange_addr; + ctio->u.status1.flags = (atio->u.isp24.attr << 9) | + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); + ctio->u.status1.scsi_status = + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); + ctio->u.status1.response_len = cpu_to_le16(18); + ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + + if (ctio->u.status1.residual != 0) + ctio->u.status1.scsi_status |= + cpu_to_le16(SS_RESIDUAL_UNDER); + + /* Response code and sense key */ + put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), + (&ctio->u.status1.sense_data)[0]); + /* Additional sense length */ + put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); + /* ASC and ASCQ */ + put_unaligned_le32(((asc << 24) | (ascq << 16)), + (&ctio->u.status1.sense_data)[3]); + + /* Memory Barrier */ + wmb(); + + qla2x00_start_iocbs(vha, vha->req); +out: + return; +} + /* callback from target fabric module code */ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) { @@ -2270,7 +2348,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, */ return -EAGAIN; } else - ha->tgt.cmds[h-1] = prm->cmd; + ha->tgt.cmds[h - 1] = prm->cmd; pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; @@ -2400,6 +2478,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd) return cmd->bufflen > 0; } +static void qlt_print_dif_err(struct qla_tgt_prm *prm) +{ + struct qla_tgt_cmd *cmd; + struct scsi_qla_host *vha; + + /* asc 0x10=dif error */ + if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { + cmd = prm->cmd; + vha = cmd->vha; + /* ASCQ */ + switch (prm->sense_buffer[13]) { + case 1: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 2: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 3: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + default: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected Dif ERR: lba[%llx|%lld] len[%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + } + ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); + } +} + /* * Called without ha->hardware_lock held */ @@ -2521,18 +2643,9 @@ skip_explict_conf: for (i = 0; i < prm->sense_buffer_len/4; i++) ((uint32_t *)ctio->u.status1.sense_data)[i] = cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); -#if 0 - if (unlikely((prm->sense_buffer_len % 4) != 0)) { - static int q; - if (q < 10) { - ql_dbg(ql_dbg_tgt, vha, 0xe04f, - "qla_target(%d): %d bytes of sense " - "lost", prm->tgt->ha->vp_idx, - prm->sense_buffer_len % 4); - q++; - } - } -#endif + + qlt_print_dif_err(prm); + } else { ctio->u.status1.flags &= ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); @@ -2546,19 +2659,9 @@ skip_explict_conf: /* Sense with len > 24, is it possible ??? */ } - - -/* diff */ static inline int qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) { - /* - * Uncomment when corresponding SCSI changes are done. - * - if (!sp->cmd->prot_chk) - return 0; - * - */ switch (se_cmd->prot_op) { case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_STRIP: @@ -2579,16 +2682,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) return 0; } +static inline int +qla_tgt_ref_mask_check(struct se_cmd *se_cmd) +{ + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + return 1; + default: + return 0; + } + return 0; +} + /* - * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command - * + * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command */ -static inline void -qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) +static void +qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, + uint16_t *pfw_prot_opts) { + struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + scsi_qla_host_t *vha = cmd->tgt->vha; + struct qla_hw_data *ha = vha->hw; + uint32_t t32 = 0; - /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 + /* + * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 * have been immplemented by TCM, before AppTag is avail. * Look for modesense_handlers[] */ @@ -2596,65 +2721,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) ctx->app_tag_mask[0] = 0x0; ctx->app_tag_mask[1] = 0x0; + if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); + switch (se_cmd->prot_type) { case TARGET_DIF_TYPE0_PROT: /* - * No check for ql2xenablehba_err_chk, as it would be an - * I/O error if hba tag generation is not done. + * No check for ql2xenablehba_err_chk, as it + * would be an I/O error if hba tag generation + * is not done. */ ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - /* enable ALL bytes of the ref tag */ ctx->ref_tag_mask[0] = 0xff; ctx->ref_tag_mask[1] = 0xff; ctx->ref_tag_mask[2] = 0xff; ctx->ref_tag_mask[3] = 0xff; break; - /* - * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and - * 16 bit app tag. - */ case TARGET_DIF_TYPE1_PROT: - ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - - /* enable ALL bytes of the ref tag */ - ctx->ref_tag_mask[0] = 0xff; - ctx->ref_tag_mask[1] = 0xff; - ctx->ref_tag_mask[2] = 0xff; - ctx->ref_tag_mask[3] = 0xff; - break; - /* - * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to - * match LBA in CDB + N - */ + /* + * For TYPE 1 protection: 16 bit GUARD tag, 32 bit + * REF tag, and 16 bit app tag. + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; case TARGET_DIF_TYPE2_PROT: - ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - - /* enable ALL bytes of the ref tag */ - ctx->ref_tag_mask[0] = 0xff; - ctx->ref_tag_mask[1] = 0xff; - ctx->ref_tag_mask[2] = 0xff; - ctx->ref_tag_mask[3] = 0xff; - break; - - /* For Type 3 protection: 16 bit GUARD only */ + /* + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF + * tag has to match LBA in CDB + N + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; case TARGET_DIF_TYPE3_PROT: - ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = - ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; - break; + /* For TYPE 3 protection: 16 bit GUARD only */ + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; + break; } } - static inline int qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) { @@ -2673,6 +2806,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t h; struct atio_from_isp *atio = &prm->cmd->atio; + struct qla_tc_param tc; uint16_t t16; ha = vha->hw; @@ -2698,16 +2832,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_STRIP: transfer_length = data_bytes; - data_bytes += dif_bytes; + if (cmd->prot_sg_cnt) + data_bytes += dif_bytes; break; - case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_PASS: case TARGET_PROT_DOUT_PASS: transfer_length = data_bytes + dif_bytes; break; - default: BUG(); break; @@ -2743,7 +2876,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) break; } - /* ---- PKT ---- */ /* Update entry type to indicate Command Type CRC_2 IOCB */ pkt->entry_type = CTIO_CRC2; @@ -2761,9 +2893,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) } else ha->tgt.cmds[h-1] = prm->cmd; - pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; - pkt->nport_handle = prm->cmd->loop_id; + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; @@ -2784,12 +2915,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) else if (cmd->dma_data_direction == DMA_FROM_DEVICE) pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); - pkt->dseg_count = prm->tot_dsds; /* Fibre channel byte count */ pkt->transfer_length = cpu_to_le32(transfer_length); - /* ----- CRC context -------- */ /* Allocate CRC context from global pool */ @@ -2809,13 +2938,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) /* Set handle */ crc_ctx_pkt->handle = pkt->handle; - qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); + qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); pkt->crc_context_len = CRC_CONTEXT_LEN_FW; - if (!bundling) { cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; } else { @@ -2836,16 +2964,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); crc_ctx_pkt->guard_seed = cpu_to_le16(0); + memset((uint8_t *)&tc, 0 , sizeof(tc)); + tc.vha = vha; + tc.blk_sz = cmd->blk_sz; + tc.bufflen = cmd->bufflen; + tc.sg = cmd->sg; + tc.prot_sg = cmd->prot_sg; + tc.ctx = crc_ctx_pkt; + tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; /* Walks data segments */ pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); if (!bundling && prm->prot_seg_cnt) { if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, - prm->tot_dsds, cmd)) + prm->tot_dsds, &tc)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, - (prm->tot_dsds - prm->prot_seg_cnt), cmd)) + (prm->tot_dsds - prm->prot_seg_cnt), &tc)) goto crc_queuing_error; if (bundling && prm->prot_seg_cnt) { @@ -2854,18 +2990,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, - prm->prot_seg_cnt, cmd)) + prm->prot_seg_cnt, &tc)) goto crc_queuing_error; } return QLA_SUCCESS; crc_queuing_error: /* Cleanup will be performed by the caller */ + vha->hw->tgt.cmds[h - 1] = NULL; return QLA_FUNCTION_FAILED; } - /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon @@ -3113,139 +3249,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer); /* - * Checks the guard or meta-data for the type of error - * detected by the HBA. + * it is assumed either hardware_lock or qpair lock is held. */ -static inline int +static void qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, - struct ctio_crc_from_fw *sts) + struct ctio_crc_from_fw *sts) { uint8_t *ap = &sts->actual_dif[0]; uint8_t *ep = &sts->expected_dif[0]; - uint32_t e_ref_tag, a_ref_tag; - uint16_t e_app_tag, a_app_tag; - uint16_t e_guard, a_guard; uint64_t lba = cmd->se_cmd.t_task_lba; + uint8_t scsi_status, sense_key, asc, ascq; + unsigned long flags; - a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); - a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); - a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); - - e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); - e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); - e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); - - ql_dbg(ql_dbg_tgt, vha, 0xe075, - "iocb(s) %p Returned STATUS.\n", sts); - - ql_dbg(ql_dbg_tgt, vha, 0xf075, - "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); - - /* - * Ignore sector if: - * For type 3: ref & app tag is all 'f's - * For type 0,1,2: app tag is all 'f's - */ - if ((a_app_tag == 0xffff) && - ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || - (a_ref_tag == 0xffffffff))) { - uint32_t blocks_done; - - /* 2TB boundary case covered automatically with this */ - blocks_done = e_ref_tag - (uint32_t)lba + 1; - cmd->se_cmd.bad_sector = e_ref_tag; - cmd->se_cmd.pi_err = 0; - ql_dbg(ql_dbg_tgt, vha, 0xf074, - "need to return scsi good\n"); - - /* Update protection tag */ - if (cmd->prot_sg_cnt) { - uint32_t i, k = 0, num_ent; - struct scatterlist *sg, *sgl; - + cmd->trc_flags |= TRC_DIF_ERR; - sgl = cmd->prot_sg; + cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); + cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); + cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); - /* Patch the corresponding protection tags */ - for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { - num_ent = sg_dma_len(sg) / 8; - if (k + num_ent < blocks_done) { - k += num_ent; - continue; - } - k = blocks_done; - break; - } + cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); + cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); + cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); - if (k != blocks_done) { - ql_log(ql_log_warn, vha, 0xf076, - "unexpected tag values tag:lba=%u:%llu)\n", - e_ref_tag, (unsigned long long)lba); - goto out; - } + ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, + "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); -#if 0 - struct sd_dif_tuple *spt; - /* TODO: - * This section came from initiator. Is it valid here? - * should ulp be override with actual val??? - */ - spt = page_address(sg_page(sg)) + sg->offset; - spt += j; + scsi_status = sense_key = asc = ascq = 0; - spt->app_tag = 0xffff; - if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = 0xffffffff; -#endif - } - - return 0; - } - - /* check guard */ - if (e_guard != a_guard) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; - - ql_log(ql_log_warn, vha, 0xe076, - "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); - goto out; + /* check appl tag */ + if (cmd->e_app_tag != cmd->a_app_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_APP; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x2; } /* check ref tag */ - if (e_ref_tag != a_ref_tag) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; - cmd->se_cmd.bad_sector = e_ref_tag; - - ql_log(ql_log_warn, vha, 0xe077, - "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); + if (cmd->e_ref_tag != cmd->a_ref_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard[%x|%x] cmd=%p ox_id[%04x] ", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_REF; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x3; goto out; } - /* check appl tag */ - if (e_app_tag != a_app_tag) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; - - ql_log(ql_log_warn, vha, 0xe078, - "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); - goto out; + /* check guard */ + if (cmd->e_guard != cmd->a_guard) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + cmd->dif_err_code = DIF_ERR_GRD; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x1; } out: - return 1; -} + switch (cmd->state) { + case QLA_TGT_STATE_NEED_DATA: + /* handle_data will load DIF error code */ + cmd->state = QLA_TGT_STATE_DATA_IN; + vha->hw->tgt.tgt_ops->handle_data(cmd); + break; + default: + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); + /* assume scsi status gets out on the wire. + * Will not wait for completion. + */ + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } +} /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ @@ -3552,6 +3662,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, { int term = 0; + if (cmd->se_cmd.prot_op) + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x] op %#x/%s", + cmd->lba, cmd->lba, + cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr, + cmd->se_cmd.prot_op, + prot_op_str(cmd->se_cmd.prot_op)); + if (ctio != NULL) { struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; term = !(c->flags & @@ -3769,32 +3889,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, struct ctio_crc_from_fw *crc = (struct ctio_crc_from_fw *)ctio; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, - "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", + "qla_target(%d): CTIO with DIF_ERROR status %x " + "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " + "expect_dif[0x%llx]\n", vha->vp_idx, status, cmd->state, se_cmd, *((u64 *)&crc->actual_dif[0]), *((u64 *)&crc->expected_dif[0])); - if (qlt_handle_dif_error(vha, cmd, ctio)) { - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - /* scsi Write/xfer rdy complete */ - goto skip_term; - } else { - /* scsi read/xmit respond complete - * call handle dif to send scsi status - * rather than terminate exchange. - */ - cmd->state = QLA_TGT_STATE_PROCESSED; - ha->tgt.tgt_ops->handle_dif_err(cmd); - return; - } - } else { - /* Need to generate a SCSI good completion. - * because FW did not send scsi status. - */ - status = 0; - goto skip_term; - } - break; + qlt_handle_dif_error(vha, cmd, ctio); + return; } default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, @@ -3817,7 +3920,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, return; } } -skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { cmd->trc_flags |= TRC_CTIO_DONE; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index a7f90dcaae37..c35f889b94a6 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio) atio->u.isp24.fcp_cmnd.add_cdb_len = 0; } +static inline int get_datalen_for_atio(struct atio_from_isp *atio) +{ + int len = atio->u.isp24.fcp_cmnd.add_cdb_len; + + return (be32_to_cpu(get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* @@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); - void (*handle_dif_err)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); @@ -684,6 +691,8 @@ struct qla_tgt_func_tmpl { void (*clear_nacl_from_fcport_map)(struct fc_port *); void (*put_sess)(struct fc_port *); void (*shutdown_sess)(struct fc_port *); + int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); + int (*chk_dif_tags)(uint32_t tag); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); @@ -720,8 +729,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_ABORT_ALL 0xFFFE #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD #define QLA_TGT_NEXUS_LOSS 0xFFFC -#define QLA_TGT_ABTS 0xFFFB -#define QLA_TGT_2G_ABORT_TASK 0xFFFA +#define QLA_TGT_ABTS 0xFFFB +#define QLA_TGT_2G_ABORT_TASK 0xFFFA /* Notify Acknowledge flags */ #define NOTIFY_ACK_RES_COUNT BIT_8 @@ -845,6 +854,7 @@ enum trace_flags { TRC_CMD_FREE = BIT_17, TRC_DATA_IN = BIT_18, TRC_ABORT = BIT_19, + TRC_DIF_ERR = BIT_20, }; struct qla_tgt_cmd { @@ -862,7 +872,6 @@ struct qla_tgt_cmd { unsigned int sg_mapped:1; unsigned int free_sg:1; unsigned int write_data_transferred:1; - unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; @@ -885,11 +894,25 @@ struct qla_tgt_cmd { struct list_head cmd_list; struct atio_from_isp atio; - /* t10dif */ + + uint8_t ctx_dsd_alloced; + + /* T10-DIF */ +#define DIF_ERR_NONE 0 +#define DIF_ERR_GRD 1 +#define DIF_ERR_REF 2 +#define DIF_ERR_APP 3 + int8_t dif_err_code; struct scatterlist *prot_sg; uint32_t prot_sg_cnt; - uint32_t blk_sz; + uint32_t blk_sz, num_blks; + uint8_t scsi_status, sense_key, asc, ascq; + struct crc_context *ctx; + uint8_t *cdb; + uint64_t lba; + uint16_t a_guard, e_guard, a_app_tag, e_app_tag; + uint32_t a_ref_tag, e_ref_tag; uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; @@ -1053,4 +1076,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); +void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, + uint8_t, uint8_t, uint8_t); + #endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 8e8ab0fa9672..7443e4efa3ae 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) return; } + switch (cmd->dif_err_code) { + case DIF_ERR_GRD: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case DIF_ERR_REF: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case DIF_ERR_APP: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + case DIF_ERR_NONE: + default: + break; + } + if (cmd->se_cmd.pi_err) transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); @@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); } -static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) +static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) { - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - - /* take an extra kref to prevent cmd free too early. - * need to wait for SCSI status/check condition to - * finish responding generate by transport_generic_request_failure. - */ - kref_get(&cmd->se_cmd.cmd_kref); - transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); + return 0; } -/* - * Called from qla_target.c:qlt_do_ctio_completion() - */ -static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) +static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, + uint16_t *pfw_prot_opts) { - INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) + *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) + *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; + + return 0; } /* @@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .handle_cmd = tcm_qla2xxx_handle_cmd, .handle_data = tcm_qla2xxx_handle_data, - .handle_dif_err = tcm_qla2xxx_handle_dif_err, .handle_tmr = tcm_qla2xxx_handle_tmr, .free_cmd = tcm_qla2xxx_free_cmd, .free_mcmd = tcm_qla2xxx_free_mcmd, @@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, + .get_dif_tags = tcm_qla2xxx_dif_tags, + .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, }; static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) -- cgit v1.2.3 From 54b9993c8cf2d77c0f23be828a22e0817f742442 Mon Sep 17 00:00:00 2001 From: Anil Gurumurthy Date: Wed, 15 Mar 2017 09:48:50 -0700 Subject: qla2xxx: Export DIF stats via debugfs Signed-off-by: Anil Gurumurthy Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_def.h | 12 ++++++++++++ drivers/scsi/qla2xxx/qla_dfs.c | 15 +++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9d1d3dcf1c87..8228dfac6a31 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3108,6 +3108,16 @@ struct qla_chip_state_84xx { uint32_t gold_fw_version; }; +struct qla_dif_statistics { + uint64_t dif_input_bytes; + uint64_t dif_output_bytes; + uint64_t dif_input_requests; + uint64_t dif_output_requests; + uint32_t dif_guard_err; + uint32_t dif_ref_tag_err; + uint32_t dif_app_tag_err; +}; + struct qla_statistics { uint32_t total_isp_aborts; uint64_t input_bytes; @@ -3120,6 +3130,8 @@ struct qla_statistics { uint32_t stat_max_pend_cmds; uint32_t stat_max_qfull_cmds_alloc; uint32_t stat_max_qfull_cmds_dropped; + + struct qla_dif_statistics qla_dif_stats; }; struct bidi_statistics { diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index b48cce696bac..3b35905619b0 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -114,6 +114,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) seq_printf(s, "num Q full sent = %lld\n", vha->tgt_counters.num_q_full_sent); + /* DIF stats */ + seq_printf(s, "DIF Inp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_bytes); + seq_printf(s, "DIF Outp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_bytes); + seq_printf(s, "DIF Inp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_requests); + seq_printf(s, "DIF Outp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_requests); + seq_printf(s, "DIF Guard err = %d\n", + vha->qla_stats.qla_dif_stats.dif_guard_err); + seq_printf(s, "DIF Ref tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_ref_tag_err); + seq_printf(s, "DIF App tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_app_tag_err); return 0; } -- cgit v1.2.3 From f1443eebca7792b3b8b41b27652d67ddc5d31fa2 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:51 -0700 Subject: qla2xxx: Add async new target notification Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 6 +++--- drivers/scsi/qla2xxx/qla_target.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 532004981dbd..563116188c43 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6005,13 +6005,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; - if (base_vha->fc_vport) - return 0; - mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); mutex_unlock(&qla_tgt_mutex); + if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) + ha->tgt.tgt_ops->add_target(base_vha); + return 0; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index c35f889b94a6..d64420251194 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -693,6 +693,7 @@ struct qla_tgt_func_tmpl { void (*shutdown_sess)(struct fc_port *); int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); int (*chk_dif_tags)(uint32_t tag); + void (*add_target)(struct scsi_qla_host *); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); -- cgit v1.2.3 From 15f30a5752287f20c7de428423c34bc51cfbe465 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:52 -0700 Subject: qla2xxx: Use IOCB interface to submit non-critical MBX. The Mailbox interface is currently over subscribed. We like to reserve the Mailbox interface for the chip managment and link initialization. Any non essential Mailbox command will be routed through the IOCB interface. The IOCB interface is able to absorb more commands. Following commands are being routed through IOCB interface - Get ID List (007Ch) - Get Port DB (0064h) - Get Link Priv Stats (006Dh) Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_def.h | 12 +- drivers/scsi/qla2xxx/qla_gbl.h | 10 +- drivers/scsi/qla2xxx/qla_init.c | 46 +------ drivers/scsi/qla2xxx/qla_isr.c | 2 +- drivers/scsi/qla2xxx/qla_mbx.c | 270 ++++++++++++++++++++++++++++++++++++-- drivers/scsi/qla2xxx/qla_target.c | 4 +- 6 files changed, 279 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8228dfac6a31..ae38b7a789b1 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -395,11 +395,15 @@ struct srb_iocb { struct completion comp; } abt; struct ct_arg ctarg; +#define MAX_IOCB_MB_REG 28 +#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) struct { - __le16 in_mb[28]; /* fr fw */ - __le16 out_mb[28]; /* to fw */ + __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ + __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ void *out, *in; dma_addr_t out_dma, in_dma; + struct completion comp; + int rc; } mbx; struct { struct imm_ntfy_from_isp *ntfy; @@ -437,7 +441,7 @@ typedef struct srb { uint32_t handle; uint16_t flags; uint16_t type; - char *name; + const char *name; int iocbs; struct qla_qpair *qpair; u32 gen1; /* scratch */ @@ -3364,6 +3368,8 @@ struct qla_hw_data { uint32_t exlogins_enabled:1; uint32_t exchoffld_enabled:1; /* 35 bits */ + + uint32_t fw_started:1; } flags; /* This spinlock is used to protect "io transactions", you must diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index ca6f122e5865..323b912b47f7 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_abort_cmd(srb_t *); /* * Global Functions in qla_mid.c source file. @@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, - dma_addr_t, uint); + dma_addr_t, uint16_t); extern int qla24xx_abort_command(srb_t *); extern int qla24xx_async_abort_command(srb_t *); @@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); +int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); +int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, + uint16_t *); +int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, + struct port_database_24xx *); + /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index a7865a5d556d..b1bfa63f7d4e 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; - uint64_t zero = 0; struct port_database_24xx *pd; fc_port_t *fcport = sp->fcport; u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; @@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; - /* Check for logged in state. */ - if (pd->current_login_state != PDS_PRLI_COMPLETE && - pd->last_login_state != PDS_PRLI_COMPLETE) { - ql_dbg(ql_dbg_mbx, vha, 0xffff, - "Unable to verify login-state (%x/%x) for " - "loop_id %x.\n", pd->current_login_state, - pd->last_login_state, fcport->loop_id); - rval = QLA_FUNCTION_FAILED; - goto gpd_error_out; - } - - if (fcport->loop_id == FC_NO_LOOP_ID || - (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && - memcmp(fcport->port_name, pd->port_name, 8))) { - /* We lost the device mid way. */ - rval = QLA_NOT_LOGGED_IN; - goto gpd_error_out; - } - - /* Names are little-endian. */ - memcpy(fcport->node_name, pd->node_name, WWN_SIZE); - - /* Get port_id of device. */ - fcport->d_id.b.domain = pd->port_id[0]; - fcport->d_id.b.area = pd->port_id[1]; - fcport->d_id.b.al_pa = pd->port_id[2]; - fcport->d_id.b.rsvd_1 = 0; - - /* If not target must be initiator or unknown type. */ - if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) - fcport->port_type = FCT_INITIATOR; - else - fcport->port_type = FCT_TARGET; - - /* Passback COS information. */ - fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? - FC_COS_CLASS2 : FC_COS_CLASS3; - - if (pd->prli_svc_param_word_3[0] & BIT_7) { - fcport->flags |= FCF_CONF_COMP_SUPPORTED; - fcport->conf_compl_supported = 1; - } + rval = __qla24xx_parse_gpdb(vha, fcport, pd); gpd_error_out: memset(&ea, 0, sizeof(ea)); @@ -1266,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res) complete(&abt->u.abt.comp); } -static int +int qla24xx_async_abort_cmd(srb_t *cmd_sp) { scsi_qla_host_t *vha = cmd_sp->vha; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b2c6da752edd..3953c8d6af69 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2692,7 +2692,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, return; abt = &sp->u.iocb_cmd; - abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); + abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); sp->done(sp, 0); } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 35079f417417..e40ed570d3c1 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,6 +10,28 @@ #include #include +static struct mb_cmd_name { + uint16_t cmd; + const char *str; +} mb_str[] = { + {MBC_GET_PORT_DATABASE, "GPDB"}, + {MBC_GET_ID_LIST, "GIDList"}, + {MBC_GET_LINK_PRIV_STATS, "Stats"}, +}; + +static const char *mb_to_str(uint16_t cmd) +{ + int i; + struct mb_cmd_name *e; + + for (i = 0; i < ARRAY_SIZE(mb_str); i++) { + e = mb_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { @@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, - dma_addr_t stats_dma, uint options) + dma_addr_t stats_dma, uint16_t options) { int rval; mbx_cmd_t mc; @@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); - mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; - mcp->mb[2] = MSW(stats_dma); - mcp->mb[3] = LSW(stats_dma); - mcp->mb[6] = MSW(MSD(stats_dma)); - mcp->mb[7] = LSW(MSD(stats_dma)); - mcp->mb[8] = sizeof(struct link_statistics) / 4; - mcp->mb[9] = vha->vp_idx; - mcp->mb[10] = options; - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; - mcp->in_mb = MBX_2|MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = IOCTL_CMD; - rval = qla2x00_mailbox_command(vha, mcp); + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_LINK_PRIV_STATS; + mc.mb[2] = MSW(stats_dma); + mc.mb[3] = LSW(stats_dma); + mc.mb[6] = MSW(MSD(stats_dma)); + mc.mb[7] = LSW(MSD(stats_dma)); + mc.mb[8] = sizeof(struct link_statistics) / 4; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16(options); + + rval = qla24xx_send_mb_cmd(vha, &mc); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { @@ -5827,3 +5847,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, return rval; } + +static void qla2x00_async_mb_sp_done(void *s, int res) +{ + struct srb *sp = s; + + sp->u.iocb_cmd.u.mbx.rc = res; + + complete(&sp->u.iocb_cmd.u.mbx.comp); + /* don't free sp here. Let the caller do the free */ +} + +/* + * This mailbox uses the iocb interface to send MB command. + * This allows non-critial (non chip setup) command to go + * out in parrallel. + */ +int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + struct srb_iocb *c; + + if (!vha->hw->flags.fw_started) + goto done; + + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); + + c = &sp->u.iocb_cmd; + c->timeout = qla2x00_async_iocb_timeout; + init_completion(&c->u.mbx.comp); + + sp->done = qla2x00_async_mb_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: %s Failed submission. %x.\n", + __func__, sp->name, rval); + goto done_free_sp; + } + + ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n", + sp->name, sp->handle); + + wait_for_completion(&c->u.mbx.comp); + memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); + + rval = c->u.mbx.rc; + switch (rval) { + case QLA_FUNCTION_TIMEOUT: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n", + __func__, sp->name, rval); + break; + case QLA_SUCCESS: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n", + __func__, sp->name); + sp->free(sp); + break; + default: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n", + __func__, sp->name, rval); + sp->free(sp); + break; + } + + return rval; + +done_free_sp: + sp->free(sp); +done: + return rval; +} + +/* + * qla24xx_gpdb_wait + * NOTE: Do not call this routine from DPC thread + */ +int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + int rval = QLA_FUNCTION_FAILED; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_PORT_DATABASE; + mc.mb[1] = cpu_to_le16(fcport->loop_id); + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16((uint16_t)opt); + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: %8phC fail\n", __func__, fcport->port_name); + goto done_free_sp; + } + + rval = __qla24xx_parse_gpdb(vha, fcport, pd); + + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n", + __func__, fcport->port_name); + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); +done: + return rval; +} + +int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + int rval = QLA_SUCCESS; + uint64_t zero = 0; + + /* Check for logged in state. */ + if (pd->current_login_state != PDS_PRLI_COMPLETE && + pd->last_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "Unable to verify login-state (%x/%x) for " + "loop_id %x.\n", pd->current_login_state, + pd->last_login_state, fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + memcpy(fcport->port_name, pd->port_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[1]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd->prli_svc_param_word_3[0] & BIT_7) { + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + fcport->conf_compl_supported = 1; + } + +gpd_error_out: + return rval; +} + +/* + * qla24xx_gidlist__wait + * NOTE: don't call this routine from DPC thread. + */ +int qla24xx_gidlist_wait(struct scsi_qla_host *vha, + void *id_list, dma_addr_t id_list_dma, uint16_t *entries) +{ + int rval = QLA_FUNCTION_FAILED; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_ID_LIST; + mc.mb[2] = MSW(id_list_dma); + mc.mb[3] = LSW(id_list_dma); + mc.mb[6] = MSW(MSD(id_list_dma)); + mc.mb[7] = LSW(MSD(id_list_dma)); + mc.mb[8] = 0; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: fail\n", __func__); + } else { + *entries = mc.mb[1]; + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: done\n", __func__); + } +done: + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 563116188c43..38bdcd325fa4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1238,7 +1238,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, } /* Get list of logged in devices */ - rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, "qla_target(%d): get_id_list() failed: %x\n", @@ -5648,7 +5648,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, fcport->loop_id = loop_id; - rc = qla2x00_get_port_database(vha, fcport, 0); + rc = qla24xx_gpdb_wait(vha, fcport, 0); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, "qla_target(%d): Failed to retrieve fcport " -- cgit v1.2.3 From c423437e3ff41b8ca551ab6621baf11538dbfe9d Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 15 Mar 2017 09:48:53 -0700 Subject: qla2xxx: Add DebugFS node to display Port Database Signed-off-by: Himanshu Madhani Signed-off-by: Giridhar Malavali Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_def.h | 2 + drivers/scsi/qla2xxx/qla_dfs.c | 92 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 90 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ae38b7a789b1..089480280558 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3300,6 +3300,8 @@ struct qlt_hw_data { uint8_t tgt_node_name[WWN_SIZE]; struct dentry *dfs_tgt_sess; + struct dentry *dfs_tgt_port_database; + struct list_head q_full_list; uint32_t num_pend_cmds; uint32_t num_qfull_cmds_alloc; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 3b35905619b0..989e17b0758c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) struct qla_hw_data *ha = vha->hw; unsigned long flags; struct fc_port *sess = NULL; - struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - seq_printf(s, "%s\n",vha->host_str); + seq_printf(s, "%s\n", vha->host_str); if (tgt) { - seq_printf(s, "Port ID Port Name Handle\n"); + seq_puts(s, "Port ID Port Name Handle\n"); spin_lock_irqsave(&ha->tgt.sess_lock, flags); list_for_each_entry(sess, &vha->vp_fcports, list) @@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) return single_open(file, qla2x00_dfs_tgt_sess_show, vha); } - static const struct file_operations dfs_tgt_sess_ops = { .open = qla2x00_dfs_tgt_sess_open, .read = seq_read, @@ -52,6 +51,78 @@ static const struct file_operations dfs_tgt_sess_ops = { .release = single_release, }; +static int +qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) +{ + scsi_qla_host_t *vha = s->private; + struct qla_hw_data *ha = vha->hw; + struct gid_list_info *gid_list; + dma_addr_t gid_list_dma; + fc_port_t fc_port; + char *id_iter; + int rc, i; + uint16_t entries, loop_id; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + seq_printf(s, "%s\n", vha->host_str); + if (tgt) { + gid_list = dma_alloc_coherent(&ha->pdev->dev, + qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); + if (!gid_list) { + ql_dbg(ql_dbg_user, vha, 0x705c, + "DMA allocation failed for %u\n", + qla2x00_gid_list_size(ha)); + return 0; + } + + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, + &entries); + if (rc != QLA_SUCCESS) + goto out_free_id_list; + + id_iter = (char *)gid_list; + + seq_puts(s, "Port Name Port ID Loop ID\n"); + + for (i = 0; i < entries; i++) { + struct gid_list_info *gid = + (struct gid_list_info *)id_iter; + loop_id = le16_to_cpu(gid->loop_id); + memset(&fc_port, 0, sizeof(fc_port_t)); + + fc_port.loop_id = loop_id; + + rc = qla24xx_gpdb_wait(vha, &fc_port, 0); + seq_printf(s, "%8phC %02x%02x%02x %d\n", + fc_port.port_name, fc_port.d_id.b.domain, + fc_port.d_id.b.area, fc_port.d_id.b.al_pa, + fc_port.loop_id); + id_iter += ha->gid_list_info_size; + } +out_free_id_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + gid_list, gid_list_dma); + } + + return 0; +} + +static int +qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + + return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); +} + +static const struct file_operations dfs_tgt_port_database_ops = { + .open = qla2x00_dfs_tgt_port_database_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) { @@ -296,6 +367,14 @@ create_nodes: goto out; } + ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", + S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); + if (!ha->tgt.dfs_tgt_port_database) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to create debugFS tgt_port_database node.\n"); + goto out; + } + ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); if (!ha->dfs_fce) { @@ -326,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) ha->tgt.dfs_tgt_sess = NULL; } + if (ha->tgt.dfs_tgt_port_database) { + debugfs_remove(ha->tgt.dfs_tgt_port_database); + ha->tgt.dfs_tgt_port_database = NULL; + } + if (ha->dfs_fw_resource_cnt) { debugfs_remove(ha->dfs_fw_resource_cnt); ha->dfs_fw_resource_cnt = NULL; -- cgit v1.2.3 From 482c9dc79204bb83c3433a59680c787a0b98c000 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:54 -0700 Subject: qla2xxx: Change scsi host lookup method. For target mode, when new scsi command arrive, driver first performs a look up of the SCSI Host. The current look up method is based on the ALPA portion of the NPort ID. For Cisco switch, the ALPA can not be used as the index. Instead, the new search method is based on the full value of the Nport_ID via btree lib. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/Kconfig | 1 + drivers/scsi/qla2xxx/qla_def.h | 2 + drivers/scsi/qla2xxx/qla_gbl.h | 2 + drivers/scsi/qla2xxx/qla_init.c | 14 +++--- drivers/scsi/qla2xxx/qla_mbx.c | 28 ++++-------- drivers/scsi/qla2xxx/qla_os.c | 1 + drivers/scsi/qla2xxx/qla_target.c | 92 +++++++++++++++++++++++++++++++++------ 7 files changed, 100 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 67c0d5aa3212..de952935b5d2 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -3,6 +3,7 @@ config SCSI_QLA_FC depends on PCI && SCSI depends on SCSI_FC_ATTRS select FW_LOADER + select BTREE ---help--- This qla2xxx driver supports all QLogic Fibre Channel PCI and PCIe host adapters. diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 089480280558..9251918773b1 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -3311,6 +3312,7 @@ struct qlt_hw_data { spinlock_t sess_lock; int rspq_vector_cpuid; spinlock_t atio_lock ____cacheline_aligned; + struct btree_head32 host_map; }; #define MAX_QFULL_CMDS_ALLOC 8192 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 323b912b47f7..5b2451745e9f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -854,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); void qla24xx_delete_sess_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *); +void qlt_update_host_map(struct scsi_qla_host *, port_id_t); +void qlt_remove_target_resources(struct qla_hw_data *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b1bfa63f7d4e..b0f6ad3020d3 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3340,8 +3340,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) uint8_t domain; char connect_type[22]; struct qla_hw_data *ha = vha->hw; - unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + port_id_t id; /* Get host addresses. */ rval = qla2x00_get_adapter_id(vha, @@ -3419,13 +3419,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) /* Save Host port and loop ID. */ /* byte order - Big Endian */ - vha->d_id.b.domain = domain; - vha->d_id.b.area = area; - vha->d_id.b.al_pa = al_pa; - - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + id.b.domain = domain; + id.b.area = area; + id.b.al_pa = al_pa; + id.b.rsvd_1 = 0; + qlt_update_host_map(vha, id); if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x2010, diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index e40ed570d3c1..53d9579acc74 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3623,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, scsi_qla_host_t *vp = NULL; unsigned long flags; int found; + port_id_t id; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, "Entered %s.\n", __func__); @@ -3630,6 +3631,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (rptid_entry->entry_status != 0) return; + id.b.domain = rptid_entry->port_id[2]; + id.b.area = rptid_entry->port_id[1]; + id.b.al_pa = rptid_entry->port_id[0]; + id.b.rsvd_1 = 0; + if (rptid_entry->format == 0) { /* loop */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, @@ -3641,13 +3647,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); - vha->d_id.b.domain = rptid_entry->port_id[2]; - vha->d_id.b.area = rptid_entry->port_id[1]; - vha->d_id.b.al_pa = rptid_entry->port_id[0]; - - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + qlt_update_host_map(vha, id); } else if (rptid_entry->format == 1) { /* fabric */ @@ -3673,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, WWN_SIZE); } - vha->d_id.b.domain = rptid_entry->port_id[2]; - vha->d_id.b.area = rptid_entry->port_id[1]; - vha->d_id.b.al_pa = rptid_entry->port_id[0]; - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + qlt_update_host_map(vha, id); } fc_host_port_name(vha->host) = @@ -3714,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (!found) return; - vp->d_id.b.domain = rptid_entry->port_id[2]; - vp->d_id.b.area = rptid_entry->port_id[1]; - vp->d_id.b.al_pa = rptid_entry->port_id[0]; - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vp, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + qlt_update_host_map(vp, id); /* * Cannot configure here as we are still sitting on the diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 54d4e802bde0..344faf59783d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3469,6 +3469,7 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_free_sysfs_attr(base_vha, true); fc_remove_host(base_vha->host); + qlt_remove_target_resources(ha); scsi_remove_host(base_vha->host); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 38bdcd325fa4..7278e046bf87 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -187,21 +187,23 @@ static inline struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, uint8_t *d_id) { - struct qla_hw_data *ha = vha->hw; - uint8_t vp_idx; - - if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) - return NULL; + struct scsi_qla_host *host; + uint32_t key = 0; - if (vha->d_id.b.al_pa == d_id[2]) + if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && + (vha->d_id.b.al_pa == d_id[2])) return vha; - BUG_ON(ha->tgt.tgt_vp_map == NULL); - vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; - if (likely(test_bit(vp_idx, ha->vp_idx_map))) - return ha->tgt.tgt_vp_map[vp_idx].vha; + key = (uint32_t)d_id[0] << 16; + key |= (uint32_t)d_id[1] << 8; + key |= (uint32_t)d_id[2]; - return NULL; + host = btree_lookup32(&vha->hw->tgt.host_map, key); + if (!host) + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "Unable to find host %06x\n", key); + + return host; } static inline @@ -6040,6 +6042,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) return 0; } +void qlt_remove_target_resources(struct qla_hw_data *ha) +{ + struct scsi_qla_host *node; + u32 key = 0; + + btree_for_each_safe32(&ha->tgt.host_map, key, node) + btree_remove32(&ha->tgt.host_map, key); + + btree_destroy32(&ha->tgt.host_map); +} + static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, unsigned char *b) { @@ -6693,6 +6706,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha, void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { + int rc; + if (!QLA_TGT_MODE_ENABLED()) return; @@ -6712,6 +6727,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) qlt_unknown_atio_work_fn); qlt_clear_mode(base_vha); + + rc = btree_init32(&ha->tgt.host_map); + if (rc) + ql_log(ql_log_info, base_vha, 0xffff, + "Unable to initialize ha->host_map btree\n"); + + qlt_update_vp_map(base_vha, SET_VP_IDX); } irqreturn_t @@ -6820,25 +6842,69 @@ qlt_mem_free(struct qla_hw_data *ha) void qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) { + void *slot; + u32 key; + int rc; + if (!QLA_TGT_MODE_ENABLED()) return; + key = vha->d_id.b24; + switch (cmd) { case SET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; break; case SET_AL_PA: - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; + slot = btree_lookup32(&vha->hw->tgt.host_map, key); + if (!slot) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "Save vha in host_map %p %06x\n", vha, key); + rc = btree_insert32(&vha->hw->tgt.host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xffff, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "replace existing vha in host_map %p %06x\n", vha, key); + btree_update32(&vha->hw->tgt.host_map, key, vha); break; case RESET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "clear vha in host_map %p %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->tgt.host_map, key); + if (slot) + btree_remove32(&vha->hw->tgt.host_map, key); + vha->d_id.b24 = 0; break; } } +void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + if (!vha->d_id.b24) { + spin_lock_irqsave(&ha->vport_slock, flags); + vha->d_id = id; + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else if (vha->d_id.b24 != id.b24) { + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } +} + static int __init qlt_parse_ini_mode(void) { if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) -- cgit v1.2.3 From ec7193e26055112bc824929fd943035f9a30b06f Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Wed, 15 Mar 2017 09:48:55 -0700 Subject: qla2xxx: Fix delayed response to command for loop mode/direct connect. Current driver wait for FW to be in the ready state before processing in-coming commands. For Arbitrated Loop or Point-to- Point (not switch), FW Ready state can take a while. FW will transition to ready state after all Nports have been logged in. In the mean time, certain initiators have completed the login and starts IO. Driver needs to start processing all queues if FW is already started. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_def.h | 10 ++++++++-- drivers/scsi/qla2xxx/qla_init.c | 12 ++++++++++++ drivers/scsi/qla2xxx/qla_isr.c | 14 +++++++++++++- drivers/scsi/qla2xxx/qla_mbx.c | 6 +++--- drivers/scsi/qla2xxx/qla_os.c | 21 ++++++++++++++++++++- drivers/scsi/qla2xxx/qla_target.c | 38 +++++++++++++++++++++++++------------- 6 files changed, 81 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9251918773b1..ae119018dfaa 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3322,6 +3322,10 @@ struct qlt_hw_data { #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ +#define QLA_EARLY_LINKUP(_ha) \ + ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \ + _ha->flags.fw_started && !_ha->flags.fw_init_done) + /* * Qlogic host adapter specific data structure. */ @@ -3371,9 +3375,11 @@ struct qla_hw_data { uint32_t fawwpn_enabled:1; uint32_t exlogins_enabled:1; uint32_t exchoffld_enabled:1; - /* 35 bits */ + uint32_t lip_ae:1; + uint32_t n2n_ae:1; uint32_t fw_started:1; + uint32_t fw_init_done:1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -3466,7 +3472,6 @@ struct qla_hw_data { #define P2P_LOOP 3 uint8_t interrupts_on; uint32_t isp_abort_cnt; - #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 @@ -3947,6 +3952,7 @@ typedef struct scsi_qla_host { struct list_head vp_fcports; /* list of fcports */ struct list_head work_list; spinlock_t work_lock; + struct work_struct iocb_work; /* Commonly used flags and state information. */ struct Scsi_Host *host; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b0f6ad3020d3..f9d2fe7b1ade 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3178,6 +3178,7 @@ next_check: } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); + ha->flags.fw_started = 1; } return (rval); @@ -4000,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_READY); ql_dbg(ql_dbg_disc, vha, 0x2069, "LOOP READY.\n"); + ha->flags.fw_init_done = 1; /* * Process any ATIO queue entries that came in @@ -5491,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + ha->flags.fw_started = 0; + ha->flags.fw_init_done = 0; ha->chip_reset++; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); @@ -6767,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) return; if (!ha->fw_major_version) return; + if (!ha->flags.fw_started) + return; ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && @@ -6780,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); } + + ha->flags.fw_started = 0; + ha->flags.fw_init_done = 0; } int diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3953c8d6af69..3203367a4f42 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -708,6 +708,8 @@ skip_rio: "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); ha->isp_ops->fw_dump(vha, 1); + ha->flags.fw_init_done = 0; + ha->flags.fw_started = 0; if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { @@ -761,6 +763,9 @@ skip_rio: break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ + ha->flags.lip_ae = 1; + ha->flags.n2n_ae = 0; + ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); @@ -797,6 +802,10 @@ skip_rio: break; case MBA_LOOP_DOWN: /* Loop Down Event */ + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(®24->mailbox4) : 0; mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) @@ -866,6 +875,9 @@ skip_rio: /* case MBA_DCBX_COMPLETE: */ case MBA_POINT_TO_POINT: /* Point-to-Point */ + ha->flags.lip_ae = 0; + ha->flags.n2n_ae = 1; + if (IS_QLA2100(ha)) break; @@ -2706,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online) + if (!ha->flags.fw_started) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 53d9579acc74..a113ab3592a7 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3638,11 +3638,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (rptid_entry->format == 0) { /* loop */ - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, + ql_dbg(ql_dbg_async, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " "VPs acquired %d.\n", rptid_entry->vp_setup, rptid_entry->vp_acquired); - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, + ql_dbg(ql_dbg_async, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); @@ -3651,7 +3651,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, } else if (rptid_entry->format == 1) { /* fabric */ - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, + ql_dbg(ql_dbg_async, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " "port id %02x%02x%02x.\n", rptid_entry->vp_idx, rptid_entry->vp_status, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 344faf59783d..41d5b09f7326 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) return atomic_read(&vha->loop_state) == LOOP_READY; } +static void qla2x00_iocb_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(work, + struct scsi_qla_host, iocb_work); + int cnt = 0; + + while (!list_empty(&vha->work_list)) { + qla2x00_do_work(vha); + cnt++; + if (cnt > 10) + break; + } +} + /* * PCI driver interface */ @@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ qla2xxx_wake_dpc(base_vha); + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { @@ -4321,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) spin_lock_irqsave(&vha->work_lock, flags); list_add_tail(&e->list, &vha->work_list); spin_unlock_irqrestore(&vha->work_lock, flags); - qla2xxx_wake_dpc(vha); + + if (QLA_EARLY_LINKUP(vha->hw)) + schedule_work(&vha->iocb_work); + else + qla2xxx_wake_dpc(vha); return QLA_SUCCESS; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 7278e046bf87..0e03ca2ab3e5 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -638,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, break; case SRB_NACK_PRLI: fcport->fw_login_state = DSC_LS_PRLI_PEND; + fcport->deleted = 0; c = "PRLI"; break; case SRB_NACK_LOGO: @@ -1576,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, request_t *pkt; struct nack_to_isp *nack; + if (!ha->flags.fw_started) + return; + ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); /* Send marker if required */ @@ -3053,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, else vha->tgt_counters.core_qla_que_buf++; - if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { + if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -3194,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || + if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from @@ -3372,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, "Sending TERM ELS CTIO (ha=%p)\n", ha); - pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe080, "qla_target(%d): %s failed: unable to allocate " @@ -4697,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } if (sess != NULL) { - if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { + if (sess->fw_login_state != DSC_LS_PLOGI_PEND && + sess->fw_login_state != DSC_LS_PLOGI_COMP) { /* * Impatient initiator sent PRLI before last * PLOGI could finish. Will force him to re-try, @@ -4736,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, /* Make session global (not used in fabric mode) */ if (ha->current_topology != ISP_CFG_F) { - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; + } else { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } } else { if (sess) { ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post nack\n", - __func__, __LINE__, sess->port_name); - + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); qla24xx_post_nack_work(vha, sess, iocb, SRB_NACK_PRLI); res = 0; @@ -4752,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } break; - case ELS_TPRLO: if (le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { @@ -5222,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, unsigned long flags; if (unlikely(tgt == NULL)) { - ql_dbg(ql_dbg_io, vha, 0x3064, + ql_dbg(ql_dbg_tgt, vha, 0x3064, "ATIO pkt, but no tgt (ha %p)", ha); return; } @@ -6359,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) struct atio_from_isp *pkt; int cnt, i; - if (!vha->flags.online) + if (!ha->flags.fw_started) return; while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || -- cgit v1.2.3 From 6c611d18f386d37cce3afbd921568e2a895bd86e Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 15 Mar 2017 09:48:56 -0700 Subject: qla2xxx: Update driver version to 9.00.00.00-k Signed-off-by: Himanshu Madhani signed-off-by: Giridhar Malavali Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_version.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 3cb1964b7786..45bc84e8e3bf 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.38-k" +#define QLA2XXX_VERSION "9.00.00.00-k" -#define QLA_DRIVER_MAJOR_VER 8 -#define QLA_DRIVER_MINOR_VER 7 +#define QLA_DRIVER_MAJOR_VER 9 +#define QLA_DRIVER_MINOR_VER 0 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 -- cgit v1.2.3 From 6985bd5e21901c2d2bfc924b114887e20c002901 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Mar 2017 22:05:20 +0100 Subject: iio: imu: st_lsm6dsx: fix FIFO_CTRL2 overwrite during watermark configuration Fixes: 290a6ce11d93 (iio: imu: add support to lsm6dsx driver) Signed-off-by: Lorenzo Bianconi Signed-off-by: Jonathan Cameron --- drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 78532ce07449..81b572d7699a 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) if (err < 0) goto out; - fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) | - (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); + fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) | + (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); wdata = cpu_to_le16(fifo_watermark); err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, -- cgit v1.2.3 From fe686babf4cf62b9b214b99847c735baa35a9fa2 Mon Sep 17 00:00:00 2001 From: Philipp Tomsich Date: Wed, 15 Mar 2017 12:23:58 +0100 Subject: clk: sunxi-ng: Fix div/mult settings for osc12M on A64 The mult/div for osc12M was previously backwards (giving a 48M rate for osc12M). Fix it. Signed-off-by: Philipp Tomsich Tested-by: Christoph Muellner Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun50i-a64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index e3c084cc6da5..f54114c607df 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); /* Fixed Factor clocks */ -static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); +static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); /* We hardcode the divider to 4 for now */ static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", -- cgit v1.2.3 From 07f5ab6002a4f0b633f3495157166f9f6180871b Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Thu, 23 Feb 2017 08:57:26 +0530 Subject: cxl: Route eeh events to all slices for pci_channel_io_perm_failure state Fix a boundary condition where in some cases an eeh event with state == pci_channel_io_perm_failure wont be passed on to a driver attached to the virtual PCI device associated with a slice. This will happen in case the slice just before (n-1) doesn't have any vPHB bus associated with it, that results in an early return from cxl_pci_error_detected() callback. With state == pci_channel_io_perm_failure, the adapter will be removed irrespective of the return value of cxl_vphb_error_detected(). So we now always return PCI_ERS_RESULT_DISCONNECTED for this case i.e even if the AFU isn't using a vPHB (currently returns PCI_ERS_RESULT_NONE). Fixes: e4f5fc001a6("cxl: Do not create vPHB if there are no AFU configuration records") Signed-off-by: Vaibhav Jain Reviewed-by: Matthew R. Ochs Reviewed-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 91f645992c94..b27ea98b781f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, /* If we're permanently dead, give up. */ if (state == pci_channel_io_perm_failure) { - /* Tell the AFU drivers; but we don't care what they - * say, we're going away. - */ for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - /* Only participate in EEH if we are on a virtual PHB */ - if (afu->phb == NULL) - return PCI_ERS_RESULT_NONE; - cxl_vphb_error_detected(afu, state); + /* + * Tell the AFU drivers; but we don't care what they + * say, we're going away. + */ + if (afu->phb != NULL) + cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; } -- cgit v1.2.3 From b467e08a15563dede0d37d3233baa24fb97a7310 Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Sat, 18 Mar 2017 04:19:43 +0800 Subject: clk: sunxi-ng: fix recalc_rate formula of NKMP clocks In commit e66f81bbd746 ("clk: sunxi-ng: Implement factors offsets"), the final formula of NKMP clocks' recalc_rate is refactored; however, the refactored formula broke the calculation due to some C language operand priority problem -- the priority of operand >> is lower than * and /, makes the formula being parsed as "(parent_rate * n * k) >> (p / m)", but it should be "(parent_rate * n * k >> p) / m". Add the pair of parentheses to fix up this issue. This pair of parentheses used to exist in the old formula. Fixes: e66f81bbd746 ("clk: sunxi-ng: Implement factors offsets") Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu_nkmp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index a2b40a000157..488055ed944f 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw, p = reg >> nkmp->p.shift; p &= (1 << nkmp->p.width) - 1; - return parent_rate * n * k >> p / m; + return (parent_rate * n * k >> p) / m; } static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, -- cgit v1.2.3 From 6be3b6cce1e225f189b68b4e84fc711d19b4277b Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Mon, 13 Mar 2017 15:49:03 -0700 Subject: ath10k: fix incorrect wlan_mac_base in qca6174_regs In the 'commit ebee76f7fa46 ("ath10k: allow setting coverage class")', it inherits the design and the address offset from ath9k, but the address is not applicable to QCA6174, which leads to a random crash while doing the resume() operation, since the set_coverage_class.ops will be called from ieee80211_reconfig() when resume() (if the wow is not configured). Fix the incorrect address offset here to avoid the random crash. Verified on QCA6174/hw3.0 with firmware WLAN.RM.4.4-00022-QCARMSWPZ-2. kvalo: this also seems to fix a regression with firmware restart. Fixes: ebee76f7fa46 ("ath10k: allow setting coverage class") Cc: # v4.10 Signed-off-by: Ryan Hsu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 33fb26833cd0..d9f37ee4bfdd 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = { .rtc_soc_base_address = 0x00000800, .rtc_wmac_base_address = 0x00001000, .soc_core_base_address = 0x0003a000, - .wlan_mac_base_address = 0x00020000, + .wlan_mac_base_address = 0x00010000, .ce_wrapper_base_address = 0x00034000, .ce0_base_address = 0x00034400, .ce1_base_address = 0x00034800, -- cgit v1.2.3 From 49e190ec332e96ba28f24b86f7a92f614707819b Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Sat, 28 Jan 2017 05:01:51 +0800 Subject: PTP: fix ptr_ret.cocci warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/ptp/ptp_kvm.c:229:1-3: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci CC: Marcelo Tosatti Signed-off-by: Fengguang Wu Signed-off-by: Radim Krčmář --- drivers/ptp/ptp_kvm.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c index 09b4df74291e..bb865695d7a6 100644 --- a/drivers/ptp/ptp_kvm.c +++ b/drivers/ptp/ptp_kvm.c @@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void) kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); - if (IS_ERR(kvm_ptp_clock.ptp_clock)) - return PTR_ERR(kvm_ptp_clock.ptp_clock); - - return 0; + return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock); } module_init(ptp_kvm_init); -- cgit v1.2.3 From c3104aae5d8cc443556f8613466e16737326e215 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Mar 2017 17:36:25 +0100 Subject: remoteproc: qcom: fix QCOM_SMD dependencies qcom_smd_register_edge() is provided by either QCOM_SMD or RPMSG_QCOM_SMD, and if both of them are disabled, it does nothing. The check for the PIL drivers however only checks for QCOM_SMD, so it breaks with QCOM_SMD=n && RPMSG_QCOM_SMD=m: drivers/remoteproc/built-in.o: In function `smd_subdev_remove': qcom_wcnss_iris.c:(.text+0x231c): undefined reference to `qcom_smd_unregister_edge' drivers/remoteproc/built-in.o: In function `smd_subdev_probe': qcom_wcnss_iris.c:(.text+0x2344): undefined reference to `qcom_smd_register_edge' drivers/remoteproc/built-in.o: In function `smd_subdev_probe': qcom_q6v5_pil.c:(.text+0x3538): undefined reference to `qcom_smd_register_edge' qcom_q6v5_pil.c:(.text+0x3538): relocation truncated to fit: R_AARCH64_CALL26 against undefined symbol `qcom_smd_register_edge' This clarifies the Kconfig dependency. Fixes: 4b48921a8f74 ("remoteproc: qcom: Use common SMD edge handler") Signed-off-by: Arnd Bergmann Signed-off-by: Bjorn Andersson --- drivers/remoteproc/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 65f86bc24c07..1dc43fc5f65f 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -76,7 +76,7 @@ config QCOM_ADSP_PIL depends on OF && ARCH_QCOM depends on REMOTEPROC depends on QCOM_SMEM - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) select MFD_SYSCON select QCOM_MDT_LOADER select QCOM_RPROC_COMMON @@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on REMOTEPROC - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) select MFD_SYSCON select QCOM_RPROC_COMMON select QCOM_SCM @@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) depends on QCOM_SMEM depends on REMOTEPROC select QCOM_MDT_LOADER -- cgit v1.2.3 From ac7ce78ba036c0f9952d9706b1941c7d80c78680 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Feb 2017 10:46:20 +0300 Subject: drm/exynos/decon5433: & vs | typo "&" was obviously intended instead of "|". The original condition is always true. Fixes: b93c2e8b5d9d ("drm/exynos/decon5433: configure sysreg in case of hardware trigger") Signed-off-by: Dan Carpenter Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 0fd6f7a18364..cca32a4fdab3 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -678,7 +678,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ctx->out_type |= IFTYPE_I80; } - if (ctx->out_type | I80_HW_TRG) { + if (ctx->out_type & I80_HW_TRG) { ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,disp-sysreg"); if (IS_ERR(ctx->sysreg)) { -- cgit v1.2.3 From 6bdc92ee4980ca10171a8de338fad612f00bb48f Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 11 Mar 2017 20:04:16 +0200 Subject: drm/exynos: Remove support for Exynos4415 (SoC not supported anymore) Support for Exynos4415 is going away because there are no internal nor external users. Since commit 46dcf0ff0de3 ("ARM: dts: exynos: Remove exynos4415.dtsi"), the platform cannot be instantiated so remove also the drivers. Signed-off-by: Krzysztof Kozlowski Acked-by: Kukjin Kim Acked-by: Rob Herring Signed-off-by: Inki Dae --- .../devicetree/bindings/display/exynos/exynos_dsim.txt | 1 - .../bindings/display/exynos/samsung-fimd.txt | 1 - drivers/gpu/drm/exynos/exynos_drm_dsi.c | 15 +-------------- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 18 ++---------------- 4 files changed, 3 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt index a78265993665..ca5204b3bc21 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt @@ -4,7 +4,6 @@ Required properties: - compatible: value should be one of the following "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ - "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */ "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt index 18645e0228b0..5837402c3ade 100644 --- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt +++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt @@ -11,7 +11,6 @@ Required properties: "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ - "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */ "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 812e2ec0761d..ef6f9c6de098 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -86,7 +86,7 @@ #define DSIM_SYNC_INFORM (1 << 27) #define DSIM_EOT_DISABLE (1 << 28) #define DSIM_MFLUSH_VS (1 << 29) -/* This flag is valid only for exynos3250/3472/4415/5260/5430 */ +/* This flag is valid only for exynos3250/3472/5260/5430 */ #define DSIM_CLKLANE_STOP (1 << 30) /* DSIM_ESCMODE */ @@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = { .reg_values = reg_values, }; -static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { - .reg_ofs = exynos_reg_ofs, - .plltmr_reg = 0x58, - .has_clklane_stop = 1, - .num_clks = 2, - .max_freq = 1000, - .wait_for_reset = 1, - .num_bits_resol = 11, - .reg_values = reg_values, -}; - static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x58, @@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = { .data = &exynos3_dsi_driver_data }, { .compatible = "samsung,exynos4210-mipi-dsi", .data = &exynos4_dsi_driver_data }, - { .compatible = "samsung,exynos4415-mipi-dsi", - .data = &exynos4415_dsi_driver_data }, { .compatible = "samsung,exynos5410-mipi-dsi", .data = &exynos5_dsi_driver_data }, { .compatible = "samsung,exynos5422-mipi-dsi", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index a9fa444c6053..661b9fe049e2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -71,10 +71,10 @@ #define TRIGCON 0x1A4 #define TRGMODE_ENABLE (1 << 0) #define SWTRGCMD_ENABLE (1 << 1) -/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ +/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */ #define HWTRGEN_ENABLE (1 << 3) #define HWTRGMASK_ENABLE (1 << 4) -/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ +/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */ #define HWTRIGEN_PER_ENABLE (1 << 31) /* display mode change control register except exynos4 */ @@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = { .has_vtsel = 1, }; -static struct fimd_driver_data exynos4415_fimd_driver_data = { - .timing_base = 0x20000, - .lcdblk_offset = 0x210, - .lcdblk_vt_shift = 10, - .lcdblk_bypass_shift = 1, - .trg_type = I80_HW_TRG, - .has_shadowcon = 1, - .has_vidoutcon = 1, - .has_vtsel = 1, - .has_trigger_per_te = 1, -}; - static struct fimd_driver_data exynos5_fimd_driver_data = { .timing_base = 0x20000, .lcdblk_offset = 0x214, @@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { .data = &exynos3_fimd_driver_data }, { .compatible = "samsung,exynos4210-fimd", .data = &exynos4_fimd_driver_data }, - { .compatible = "samsung,exynos4415-fimd", - .data = &exynos4415_fimd_driver_data }, { .compatible = "samsung,exynos5250-fimd", .data = &exynos5_fimd_driver_data }, { .compatible = "samsung,exynos5420-fimd", -- cgit v1.2.3 From a392276d1dec63e5aabe6f527c37de29a729559a Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Tue, 14 Mar 2017 09:27:56 +0100 Subject: drm/exynos: move crtc event handling to drivers callbacks CRTC event is currently send with next vblank, or instantly in case crtc is being disabled. This approach usually works, but in corner cases it can result in premature event generation. Only device driver is able to verify if the event can be sent. This patch is a first step in that direction - it moves event handling to the drivers. Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 1 + drivers/gpu/drm/exynos/exynos7_drm_decon.c | 1 + drivers/gpu/drm/exynos/exynos_drm_crtc.c | 29 +++++++++++++++------------ drivers/gpu/drm/exynos/exynos_drm_crtc.h | 2 ++ drivers/gpu/drm/exynos/exynos_drm_fimd.c | 2 ++ drivers/gpu/drm/exynos/exynos_drm_vidi.c | 1 + drivers/gpu/drm/exynos/exynos_mixer.c | 1 + 7 files changed, 24 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index cca32a4fdab3..2130ccf1e036 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -378,6 +378,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); + exynos_crtc_handle_event(crtc); } static void decon_swreset(struct decon_context *ctx) diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f9ab19e205e2..48811806fa27 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = 0; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); + exynos_crtc_handle_event(crtc); } static void decon_init(struct decon_context *ctx) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 5367b6664fe3..c65f4509932c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - struct drm_pending_vblank_event *event; - unsigned long flags; if (exynos_crtc->ops->atomic_flush) exynos_crtc->ops->atomic_flush(exynos_crtc); +} + +static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { + .enable = exynos_drm_crtc_enable, + .disable = exynos_drm_crtc_disable, + .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, + .atomic_check = exynos_crtc_atomic_check, + .atomic_begin = exynos_crtc_atomic_begin, + .atomic_flush = exynos_crtc_atomic_flush, +}; + +void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc) +{ + struct drm_crtc *crtc = &exynos_crtc->base; + struct drm_pending_vblank_event *event = crtc->state->event; + unsigned long flags; - event = crtc->state->event; if (event) { crtc->state->event = NULL; - spin_lock_irqsave(&crtc->dev->event_lock, flags); if (drm_crtc_vblank_get(crtc) == 0) drm_crtc_arm_vblank_event(crtc, event); @@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, } -static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { - .enable = exynos_drm_crtc_enable, - .disable = exynos_drm_crtc_disable, - .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, - .atomic_check = exynos_crtc_atomic_check, - .atomic_begin = exynos_crtc_atomic_begin, - .atomic_flush = exynos_crtc_atomic_flush, -}; - static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 6a581a8af465..abd5d6ceac0c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, */ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); +void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc); + #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 661b9fe049e2..a3162a4566ce 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -709,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc) for (i = 0; i < WINDOWS_NR; i++) fimd_shadow_protect_win(ctx, i, false); + + exynos_crtc_handle_event(crtc); } static void fimd_update_plane(struct exynos_drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 57fe514d5c5b..5d9a62a87eec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = { .enable_vblank = vidi_enable_vblank, .disable_vblank = vidi_disable_vblank, .update_plane = vidi_update_plane, + .atomic_flush = exynos_crtc_handle_event, }; static void vidi_fake_vblank_timer(unsigned long arg) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 72143ac10525..25edb635a197 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) return; mixer_vsync_set_update(mixer_ctx, true); + exynos_crtc_handle_event(crtc); } static void mixer_enable(struct exynos_drm_crtc *crtc) -- cgit v1.2.3 From 73488331eb9460d14974a1e2c734f77ce8869183 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Tue, 14 Mar 2017 09:27:57 +0100 Subject: drm/exynos/decon5433: fix vblank event handling Current implementation of event handling assumes that vblank interrupt is always called at the right time. It is not true, it can be delayed due to various reasons. As a result different races can happen. The patch fixes the issue by using hardware frame counter present in DECON to serialize vblank and commit completion events. Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 78 ++++++++++++++++++++++++++- include/video/exynos5433_decon.h | 8 +++ 2 files changed, 85 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 2130ccf1e036..cfe6f8af849f 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -68,6 +68,8 @@ struct decon_context { unsigned long flags; unsigned long out_type; int first_win; + spinlock_t vblank_lock; + u32 frame_id; }; static const uint32_t decon_formats[] = { @@ -122,6 +124,48 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) writel(0, ctx->addr + DECON_VIDINTCON0); } +/* return number of starts/ends of frame transmissions since reset */ +static u32 decon_get_frame_count(struct decon_context *ctx, bool end) +{ + u32 frm, pfrm, status, cnt = 2; + + /* To get consistent result repeat read until frame id is stable. + * Usually the loop will be executed once, in rare cases when the loop + * is executed at frame change time 2nd pass will be needed. + */ + frm = readl(ctx->addr + DECON_CRFMID); + do { + status = readl(ctx->addr + DECON_VIDCON1); + pfrm = frm; + frm = readl(ctx->addr + DECON_CRFMID); + } while (frm != pfrm && --cnt); + + /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case + * of RGB, it should be taken into account. + */ + if (!frm) + return 0; + + switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) { + case VIDCON1_VSTATUS_VS: + if (!(ctx->out_type & IFTYPE_I80)) + --frm; + break; + case VIDCON1_VSTATUS_BP: + --frm; + break; + case VIDCON1_I80_ACTIVE: + case VIDCON1_VSTATUS_AC: + if (end) + --frm; + break; + default: + break; + } + + return frm; +} + static void decon_setup_trigger(struct decon_context *ctx) { if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) @@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, static void decon_atomic_flush(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; + unsigned long flags; int i; if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; + spin_lock_irqsave(&ctx->vblank_lock, flags); + for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); @@ -378,12 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); + + ctx->frame_id = decon_get_frame_count(ctx, true); + exynos_crtc_handle_event(crtc); + + spin_unlock_irqrestore(&ctx->vblank_lock, flags); } static void decon_swreset(struct decon_context *ctx) { unsigned int tries; + unsigned long flags; writel(0, ctx->addr + DECON_VIDCON0); for (tries = 2000; tries; --tries) { @@ -401,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx) WARN(tries == 0, "failed to software reset DECON\n"); + spin_lock_irqsave(&ctx->vblank_lock, flags); + ctx->frame_id = 0; + spin_unlock_irqrestore(&ctx->vblank_lock, flags); + if (!(ctx->out_type & IFTYPE_HDMI)) return; @@ -579,6 +636,24 @@ static const struct component_ops decon_component_ops = { .unbind = decon_unbind, }; +static void decon_handle_vblank(struct decon_context *ctx) +{ + u32 frm; + + spin_lock(&ctx->vblank_lock); + + frm = decon_get_frame_count(ctx, true); + + if (frm != ctx->frame_id) { + /* handle only if incremented, take care of wrap-around */ + if ((s32)(frm - ctx->frame_id) > 0) + drm_crtc_handle_vblank(&ctx->crtc->base); + ctx->frame_id = frm; + } + + spin_unlock(&ctx->vblank_lock); +} + static irqreturn_t decon_irq_handler(int irq, void *dev_id) { struct decon_context *ctx = dev_id; @@ -599,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) return IRQ_HANDLED; } - drm_crtc_handle_vblank(&ctx->crtc->base); + decon_handle_vblank(ctx); } out: @@ -672,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) __set_bit(BIT_SUSPENDED, &ctx->flags); ctx->dev = dev; ctx->out_type = (unsigned long)of_device_get_match_data(dev); + spin_lock_init(&ctx->vblank_lock); if (ctx->out_type & IFTYPE_HDMI) { ctx->first_win = 1; diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index ef8e2a8ad0af..352fc0d9528f 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h @@ -46,6 +46,7 @@ #define DECON_FRAMEFIFO_STATUS 0x0524 #define DECON_CMU 0x1404 #define DECON_UPDATE 0x1410 +#define DECON_CRFMID 0x1414 #define DECON_UPDATE_SCHEME 0x1438 #define DECON_VIDCON1 0x2000 #define DECON_VIDCON2 0x2004 @@ -142,6 +143,13 @@ #define STANDALONE_UPDATE_F (1 << 0) /* DECON_VIDCON1 */ +#define VIDCON1_LINECNT_MASK (0x0fff << 16) +#define VIDCON1_I80_ACTIVE (1 << 15) +#define VIDCON1_VSTATUS_MASK (0x3 << 13) +#define VIDCON1_VSTATUS_VS (0 << 13) +#define VIDCON1_VSTATUS_BP (1 << 13) +#define VIDCON1_VSTATUS_AC (2 << 13) +#define VIDCON1_VSTATUS_FP (3 << 13) #define VIDCON1_VCLK_MASK (0x3 << 9) #define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) #define VIDCON1_VCLK_HOLD (0x0 << 9) -- cgit v1.2.3 From f3cce673e1c11112c536fbc8e6912c5414d7141f Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Tue, 14 Mar 2017 09:27:58 +0100 Subject: drm/exynos/decon5433: signal frame done interrupt at front porch DECON in case of video mode generates interrupt by default at start of vertical back porch. As this interrupt is used to generate VBLANK events more optimal point is start of vertical front porch. Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 2 +- include/video/exynos5433_decon.h | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index cfe6f8af849f..a43d0fa0b051 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -105,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc) if (ctx->out_type & IFTYPE_I80) val |= VIDINTCON0_FRAMEDONE; else - val |= VIDINTCON0_INTFRMEN; + val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP; writel(val, ctx->addr + DECON_VIDINTCON0); } diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index 352fc0d9528f..6b083d327e98 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h @@ -127,6 +127,10 @@ /* VIDINTCON0 */ #define VIDINTCON0_FRAMEDONE (1 << 17) +#define VIDINTCON0_FRAMESEL_BP (0 << 15) +#define VIDINTCON0_FRAMESEL_VS (1 << 15) +#define VIDINTCON0_FRAMESEL_AC (2 << 15) +#define VIDINTCON0_FRAMESEL_FP (3 << 15) #define VIDINTCON0_INTFRMEN (1 << 12) #define VIDINTCON0_INTEN (1 << 0) -- cgit v1.2.3 From 82a01783252be726d76cdbbababc16540f582cec Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Tue, 14 Mar 2017 09:27:59 +0100 Subject: drm/exynos/fimd: signal frame done interrupt at front porch VBLANK interrupt should be signalled as soon as scanout ends, front porch is the best moment. Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index a3162a4566ce..3f04d72c448d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -243,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc) val |= VIDINTCON0_INT_FRAME; val &= ~VIDINTCON0_FRAMESEL0_MASK; - val |= VIDINTCON0_FRAMESEL0_VSYNC; + val |= VIDINTCON0_FRAMESEL0_FRONTPORCH; val &= ~VIDINTCON0_FRAMESEL1_MASK; val |= VIDINTCON0_FRAMESEL1_NONE; } -- cgit v1.2.3 From f07d9c2864d4ccf0a0b5bd2dd6491f5204eab5fe Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Tue, 14 Mar 2017 09:28:00 +0100 Subject: drm/exynos/decon5433: fix software trigger mask The patch fixes copy/paste bug introduced during code refactoring. Reported-by: Dan Carpenter Fixes: b93c2e8b5d9d ("drm/exynos/decon5433: configure sysreg in case of hardware trigger")Fixes: Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index a43d0fa0b051..c0e8d3302292 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -172,8 +172,8 @@ static void decon_setup_trigger(struct decon_context *ctx) return; if (!(ctx->out_type & I80_HW_TRG)) { - writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN - | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, + writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | + TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, ctx->addr + DECON_TRIGCON); return; } -- cgit v1.2.3 From 9cdf0ed25a9b34fd82cb0eb47a8bdc47dc9f4ff5 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 14 Mar 2017 20:38:04 +0200 Subject: drm/exynos: Print kernel pointers in a restricted form Printing raw kernel pointers might reveal information which sometimes we try to hide (e.g. with Kernel Address Space Layout Randomization). Use the "%pK" format so these pointers will be hidden for unprivileged users. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 4 ++-- drivers/gpu/drm/exynos/exynos_drm_fimc.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_gem.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_gsc.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_ipp.c | 22 +++++++++++----------- drivers/gpu/drm/exynos/exynos_drm_rotator.c | 2 +- 6 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index ef6f9c6de098..c671f8e017db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -966,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, bool first = !xfer->tx_done; u32 reg; - dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", + dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); if (length > DSI_TX_FIFO_SIZE) @@ -1164,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi) spin_unlock_irqrestore(&dsi->transfer_lock, flags); dev_dbg(dsi->dev, - "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", + "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, xfer->rx_done); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 95871577015d..5b18b5c5fdf2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev) goto err_put_clk; } - DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); + DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); spin_lock_init(&ctx->lock); platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4c28f7ffcc4d..55a1579d11b3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } - DRM_DEBUG_KMS("created file object = %p\n", obj->filp); + DRM_DEBUG_KMS("created file object = %pK\n", obj->filp); return exynos_gem; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index bef57987759d..0506b2b17ac1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev) return ret; } - DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); + DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); mutex_init(&ctx->lock); platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 9c84ee76f18a..3edda18cc2d2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) * e.g PAUSE state, queue buf, command control. */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); + DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv); mutex_lock(&ippdrv->cmd_lock); list_for_each_entry(c_node, &ippdrv->cmd_list, list) { @@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, } property->prop_id = ret; - DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", + DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n", property->prop_id, property->cmd, ippdrv); /* stored property information and ippdrv in private data */ @@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev, { int i; - DRM_DEBUG_KMS("node[%p]\n", m_node); + DRM_DEBUG_KMS("node[%pK]\n", m_node); if (!m_node) { DRM_ERROR("invalid dequeue node.\n"); @@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node m_node->buf_id = qbuf->buf_id; INIT_LIST_HEAD(&m_node->list); - DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); + DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id); DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); for_each_ipp_planar(i) { @@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, mutex_lock(&c_node->event_lock); list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { - DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); + DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e); /* * qbuf == NULL condition means all event deletion. @@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node /* find memory node from memory list */ list_for_each_entry(m_node, head, list) { - DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); + DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node); /* compare buffer id */ if (m_node->buf_id == qbuf->buf_id) @@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, struct exynos_drm_ipp_ops *ops = NULL; int ret = 0; - DRM_DEBUG_KMS("node[%p]\n", m_node); + DRM_DEBUG_KMS("node[%pK]\n", m_node); if (!m_node) { DRM_ERROR("invalid queue node.\n"); @@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - DRM_DEBUG_KMS("m_node[%p]\n", m_node); + DRM_DEBUG_KMS("m_node[%pK]\n", m_node); ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { @@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) } ippdrv->prop_list.ipp_id = ret; - DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", + DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n", count++, ippdrv, ret); /* store parent device for node */ @@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, file_priv->ipp_dev = dev; - DRM_DEBUG_KMS("done priv[%p]\n", dev); + DRM_DEBUG_KMS("done priv[%pK]\n", dev); return 0; } @@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, mutex_lock(&ippdrv->cmd_lock); list_for_each_entry_safe(c_node, tc_node, &ippdrv->cmd_list, list) { - DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", + DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv); if (c_node->filp == file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 6591e406084c..79282a820ecc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev) goto err_ippdrv_register; } - DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); + DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv); platform_set_drvdata(pdev, rot); -- cgit v1.2.3 From 22e098daae7e53763493b9d9976ef8c65190017e Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Wed, 15 Mar 2017 12:20:42 +0100 Subject: drm/exynos/dsi: make te-gpios optional DSI forwards te-gpios interrupts to display controller, but if display controller works in HW-TRIGGER mode this interrupt is not necessary. Making te-gpios property optional allows to avoid generating spare interrupts. And also if panel device node of command mode panel device doesn't provide te-gpios property then the panel driver failed to probe. This was a critial issue. With this patch we can not only get rid of 60 interrupt callbacks per second but also fix the critial issues. Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index c671f8e017db..d7ef26370e67 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1335,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi) int te_gpio_irq; dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); + if (dsi->te_gpio == -ENOENT) + return 0; + if (!gpio_is_valid(dsi->te_gpio)) { - dev_err(dsi->dev, "no te-gpios specified\n"); ret = dsi->te_gpio; + dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret); goto out; } -- cgit v1.2.3 From 49cc4c217c0dbb7d09c402472d1f85a81c093f9f Mon Sep 17 00:00:00 2001 From: Aaron Armstrong Skomra Date: Mon, 6 Mar 2017 10:54:57 -0800 Subject: HID: wacom: Correct Intuos Pro 2 resolution The features struct for the second gen Intuos Pro uses the wrong constant for the resolution. This fix is for commit 4922cd2. Fixes: 4922cd2 ("HID: wacom: Support 2nd-gen Intuos Pro's Bluetooth classic interface") Signed-off-by: Aaron Armstrong Skomra Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/wacom_wac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 4aa3de9f1163..3d864466d473 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -4197,10 +4197,10 @@ static const struct wacom_features wacom_features_0x343 = WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x360 = { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; static const struct wacom_features wacom_features_0x361 = { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; static const struct wacom_features wacom_features_HID_ANY_ID = { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; -- cgit v1.2.3 From b6b1f19b06b7d4dcc261a88d74c5fb0a53988b4e Mon Sep 17 00:00:00 2001 From: Aaron Armstrong Skomra Date: Mon, 6 Mar 2017 10:54:58 -0800 Subject: HID: wacom: don't manually release resources for the EKR Commit 5b779fc introduces the manual release of resources in wacom_remove() as an addition to the driver's use of devm. The EKR resources can only be released through wacom_remote_destroy_one() so we skip the manual release for it. Fixes: 5b779fc ("HID: wacom: release the resources before leaving despite devm") Signed-off-by: Aaron Armstrong Skomra Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/wacom_sys.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index be8f7e2a026f..994bddc55b82 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2579,7 +2579,9 @@ static void wacom_remove(struct hid_device *hdev) /* make sure we don't trigger the LEDs */ wacom_led_groups_release(wacom); - wacom_release_resources(wacom); + + if (wacom->wacom_wac.features.type != REMOTE) + wacom_release_resources(wacom); hid_set_drvdata(hdev, NULL); } -- cgit v1.2.3 From deaba636997557fce46ca7bcb509bff5ea1b0558 Mon Sep 17 00:00:00 2001 From: Oscar Campos Date: Fri, 10 Feb 2017 18:23:00 +0000 Subject: HID: corsair: support for K65-K70 Rapidfire and Scimitar Pro RGB Add quirks for several corsair gaming devices to avoid long delays on report initialization Supported devices: - Corsair K65RGB Rapidfire Gaming Keyboard - Corsair K70RGB Rapidfire Gaming Keyboard - Corsair Scimitar Pro RGB Gaming Mouse Signed-off-by: Oscar Campos Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 3 +++ drivers/hid/usbhid/hid-quirks.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b3df60da0297..0e2e7c571d22 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -278,6 +278,9 @@ #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 +#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38 +#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39 +#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e #define USB_VENDOR_ID_CREATIVELABS 0x041e #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index d6847a664446..a69a3c88ab29 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -80,6 +80,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, -- cgit v1.2.3 From 01adc47e885f1127b29d76d0dfb21d8262f9d6b4 Mon Sep 17 00:00:00 2001 From: Oscar Campos Date: Mon, 6 Mar 2017 21:02:39 +0000 Subject: HID: corsair: Add driver Scimitar Pro RGB gaming mouse 1b1c:1b3e support to hid-corsair This mouse sold by Corsair as Scimitar PRO RGB defines two consecutive Logical Minimum items in its Application (Consumer.0001) report making it non parseable. This patch fixes the report descriptor overriding byte 77 in rdesc from 0x16 (Logical Minimum with 16 bits value) to 0x26 (Logical Maximum with 16 bits value). Signed-off-by: Oscar Campos Signed-off-by: Jiri Kosina --- drivers/hid/Kconfig | 1 + drivers/hid/hid-core.c | 1 + drivers/hid/hid-corsair.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) (limited to 'drivers') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 8eab3200ac9a..8c54cb8f5d6d 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -190,6 +190,7 @@ config HID_CORSAIR Supported devices: - Vengeance K90 + - Scimitar PRO RGB config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index ae01ae601d74..3ceb4a2af381 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index c0303f61c26a..9ba5d98a1180 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -3,8 +3,10 @@ * * Supported devices: * - Vengeance K90 Keyboard + * - Scimitar PRO RGB Gaming Mouse * * Copyright (c) 2015 Clement Vuchener + * Copyright (c) 2017 Oscar Campos */ /* @@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev, return 0; } +/* + * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is + * non parseable as they define two consecutive Logical Minimum for + * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16 + * that should be obviousy 0x26 for Logical Magimum of 16 bits. This + * prevents poper parsing of the report descriptor due Logical + * Minimum being larger than Logical Maximum. + * + * This driver fixes the report descriptor for: + * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse + */ + +static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { + /* + * Corsair Scimitar RGB Pro report descriptor is broken and + * defines two different Logical Minimum for the Consumer + * Application. The byte 77 should be a 0x26 defining a 16 + * bits integer for the Logical Maximum but it is a 0x16 + * instead (Logical Minimum) + */ + switch (hdev->product) { + case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB: + if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16 + && rdesc[78] == 0xff && rdesc[79] == 0x0f) { + hid_info(hdev, "Fixing up report descriptor\n"); + rdesc[77] = 0x26; + } + break; + } + + } + return rdesc; +} + static const struct hid_device_id corsair_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), .driver_data = CORSAIR_USE_K90_MACRO | CORSAIR_USE_K90_BACKLIGHT }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, + USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, {} }; @@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = { .event = corsair_event, .remove = corsair_remove, .input_mapping = corsair_input_mapping, + .report_fixup = corsair_mouse_report_fixup, }; module_hid_driver(corsair_driver); MODULE_LICENSE("GPL"); +/* Original K90 driver author */ MODULE_AUTHOR("Clement Vuchener"); +/* Scimitar PRO RGB driver author */ +MODULE_AUTHOR("Oscar Campos"); MODULE_DESCRIPTION("HID driver for Corsair devices"); -- cgit v1.2.3 From 6e5364f5f472d4ea66d37459e299071b0190362c Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Tue, 14 Mar 2017 17:08:16 -0700 Subject: HID: wacom: generic: Wacom mouse is only provided for opaque tablets Commit f85c9dc ("Support tool ID and additional tool types") introduced mouse and lens cursor tools to generic codepath, which covers both display (direct) and opaque tablets (indirect devices). However, mouse and lens cursor tools are only provided for opaque tablets. This patch ignores mouse and lens cursor tools if the device is a display tablet. Signed-off-by: Ping Cheng Reviewed-by: Jason Gerecke Signed-off-by: Jiri Kosina --- drivers/hid/wacom_wac.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 3d864466d473..94250c293be2 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); - input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); - input_set_capability(input, EV_KEY, BTN_TOOL_LENS); + if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) { + input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); + input_set_capability(input, EV_KEY, BTN_TOOL_LENS); + } break; case WACOM_HID_WD_FINGERWHEEL: wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); -- cgit v1.2.3 From 3d3d18f086cdda72ee18a454db70ca72c6e3246c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Mar 2017 14:45:31 +0000 Subject: drm/i915: Avoid rcu_barrier() from reclaim paths (shrinker) The rcu_barrier() takes the cpu_hotplug mutex which itself is not reclaim-safe, and so rcu_barrier() is illegal from inside the shrinker. [ 309.661373] ========================================================= [ 309.661376] [ INFO: possible irq lock inversion dependency detected ] [ 309.661380] 4.11.0-rc1-CI-CI_DRM_2333+ #1 Tainted: G W [ 309.661383] --------------------------------------------------------- [ 309.661386] gem_exec_gttfil/6435 just changed the state of lock: [ 309.661389] (rcu_preempt_state.barrier_mutex){+.+.-.}, at: [] _rcu_barrier+0x31/0x160 [ 309.661399] but this lock took another, RECLAIM_FS-unsafe lock in the past: [ 309.661402] (cpu_hotplug.lock){+.+.+.} [ 309.661404] and interrupts could create inverse lock ordering between them. [ 309.661410] other info that might help us debug this: [ 309.661414] Possible interrupt unsafe locking scenario: [ 309.661417] CPU0 CPU1 [ 309.661419] ---- ---- [ 309.661421] lock(cpu_hotplug.lock); [ 309.661425] local_irq_disable(); [ 309.661432] lock(rcu_preempt_state.barrier_mutex); [ 309.661441] lock(cpu_hotplug.lock); [ 309.661446] [ 309.661448] lock(rcu_preempt_state.barrier_mutex); [ 309.661453] *** DEADLOCK *** [ 309.661460] 4 locks held by gem_exec_gttfil/6435: [ 309.661464] #0: (sb_writers#10){.+.+.+}, at: [] vfs_write+0x17d/0x1f0 [ 309.661475] #1: (debugfs_srcu){......}, at: [] debugfs_use_file_start+0x41/0xa0 [ 309.661486] #2: (&attr->mutex){+.+.+.}, at: [] simple_attr_write+0x37/0xe0 [ 309.661495] #3: (&dev->struct_mutex){+.+.+.}, at: [] i915_drop_caches_set+0x3a/0x150 [i915] [ 309.661540] the shortest dependencies between 2nd lock and 1st lock: [ 309.661547] -> (cpu_hotplug.lock){+.+.+.} ops: 829 { [ 309.661553] HARDIRQ-ON-W at: [ 309.661560] __lock_acquire+0x5e5/0x1b50 [ 309.661565] lock_acquire+0xc9/0x220 [ 309.661572] __mutex_lock+0x6e/0x990 [ 309.661576] mutex_lock_nested+0x16/0x20 [ 309.661583] get_online_cpus+0x61/0x80 [ 309.661590] kmem_cache_create+0x25/0x1d0 [ 309.661596] debug_objects_mem_init+0x30/0x249 [ 309.661602] start_kernel+0x341/0x3fe [ 309.661607] x86_64_start_reservations+0x2a/0x2c [ 309.661612] x86_64_start_kernel+0x173/0x186 [ 309.661619] verify_cpu+0x0/0xfc [ 309.661622] SOFTIRQ-ON-W at: [ 309.661627] __lock_acquire+0x611/0x1b50 [ 309.661632] lock_acquire+0xc9/0x220 [ 309.661636] __mutex_lock+0x6e/0x990 [ 309.661641] mutex_lock_nested+0x16/0x20 [ 309.661646] get_online_cpus+0x61/0x80 [ 309.661650] kmem_cache_create+0x25/0x1d0 [ 309.661655] debug_objects_mem_init+0x30/0x249 [ 309.661660] start_kernel+0x341/0x3fe [ 309.661664] x86_64_start_reservations+0x2a/0x2c [ 309.661669] x86_64_start_kernel+0x173/0x186 [ 309.661674] verify_cpu+0x0/0xfc [ 309.661677] RECLAIM_FS-ON-W at: [ 309.661682] mark_held_locks+0x6f/0xa0 [ 309.661687] lockdep_trace_alloc+0xb3/0x100 [ 309.661693] kmem_cache_alloc_trace+0x31/0x2e0 [ 309.661699] __smpboot_create_thread.part.1+0x27/0xe0 [ 309.661704] smpboot_create_threads+0x61/0x90 [ 309.661709] cpuhp_invoke_callback+0x9c/0x8a0 [ 309.661713] cpuhp_up_callbacks+0x31/0xb0 [ 309.661718] _cpu_up+0x7a/0xc0 [ 309.661723] do_cpu_up+0x5f/0x80 [ 309.661727] cpu_up+0xe/0x10 [ 309.661734] smp_init+0x71/0xb3 [ 309.661738] kernel_init_freeable+0x94/0x19e [ 309.661743] kernel_init+0x9/0xf0 [ 309.661748] ret_from_fork+0x2e/0x40 [ 309.661752] INITIAL USE at: [ 309.661757] __lock_acquire+0x234/0x1b50 [ 309.661761] lock_acquire+0xc9/0x220 [ 309.661766] __mutex_lock+0x6e/0x990 [ 309.661771] mutex_lock_nested+0x16/0x20 [ 309.661775] get_online_cpus+0x61/0x80 [ 309.661780] __cpuhp_setup_state+0x44/0x170 [ 309.661785] page_alloc_init+0x23/0x3a [ 309.661790] start_kernel+0x124/0x3fe [ 309.661794] x86_64_start_reservations+0x2a/0x2c [ 309.661799] x86_64_start_kernel+0x173/0x186 [ 309.661804] verify_cpu+0x0/0xfc [ 309.661807] } [ 309.661813] ... key at: [] cpu_hotplug+0xb0/0x100 [ 309.661817] ... acquired at: [ 309.661821] lock_acquire+0xc9/0x220 [ 309.661825] __mutex_lock+0x6e/0x990 [ 309.661829] mutex_lock_nested+0x16/0x20 [ 309.661833] get_online_cpus+0x61/0x80 [ 309.661837] _rcu_barrier+0x9f/0x160 [ 309.661841] rcu_barrier+0x10/0x20 [ 309.661847] netdev_run_todo+0x5f/0x310 [ 309.661852] rtnl_unlock+0x9/0x10 [ 309.661856] default_device_exit_batch+0x133/0x150 [ 309.661862] ops_exit_list.isra.0+0x4d/0x60 [ 309.661866] cleanup_net+0x1d8/0x2c0 [ 309.661872] process_one_work+0x1f4/0x6d0 [ 309.661876] worker_thread+0x49/0x4a0 [ 309.661881] kthread+0x107/0x140 [ 309.661884] ret_from_fork+0x2e/0x40 [ 309.661890] -> (rcu_preempt_state.barrier_mutex){+.+.-.} ops: 179 { [ 309.661896] HARDIRQ-ON-W at: [ 309.661901] __lock_acquire+0x5e5/0x1b50 [ 309.661905] lock_acquire+0xc9/0x220 [ 309.661910] __mutex_lock+0x6e/0x990 [ 309.661914] mutex_lock_nested+0x16/0x20 [ 309.661919] _rcu_barrier+0x31/0x160 [ 309.661923] rcu_barrier+0x10/0x20 [ 309.661928] netdev_run_todo+0x5f/0x310 [ 309.661932] rtnl_unlock+0x9/0x10 [ 309.661936] default_device_exit_batch+0x133/0x150 [ 309.661941] ops_exit_list.isra.0+0x4d/0x60 [ 309.661946] cleanup_net+0x1d8/0x2c0 [ 309.661951] process_one_work+0x1f4/0x6d0 [ 309.661955] worker_thread+0x49/0x4a0 [ 309.661960] kthread+0x107/0x140 [ 309.661964] ret_from_fork+0x2e/0x40 [ 309.661968] SOFTIRQ-ON-W at: [ 309.661972] __lock_acquire+0x611/0x1b50 [ 309.661977] lock_acquire+0xc9/0x220 [ 309.661981] __mutex_lock+0x6e/0x990 [ 309.661986] mutex_lock_nested+0x16/0x20 [ 309.661990] _rcu_barrier+0x31/0x160 [ 309.661995] rcu_barrier+0x10/0x20 [ 309.661999] netdev_run_todo+0x5f/0x310 [ 309.662003] rtnl_unlock+0x9/0x10 [ 309.662008] default_device_exit_batch+0x133/0x150 [ 309.662013] ops_exit_list.isra.0+0x4d/0x60 [ 309.662017] cleanup_net+0x1d8/0x2c0 [ 309.662022] process_one_work+0x1f4/0x6d0 [ 309.662027] worker_thread+0x49/0x4a0 [ 309.662031] kthread+0x107/0x140 [ 309.662035] ret_from_fork+0x2e/0x40 [ 309.662039] IN-RECLAIM_FS-W at: [ 309.662043] __lock_acquire+0x638/0x1b50 [ 309.662048] lock_acquire+0xc9/0x220 [ 309.662053] __mutex_lock+0x6e/0x990 [ 309.662058] mutex_lock_nested+0x16/0x20 [ 309.662062] _rcu_barrier+0x31/0x160 [ 309.662067] rcu_barrier+0x10/0x20 [ 309.662089] i915_gem_shrink_all+0x33/0x40 [i915] [ 309.662109] i915_drop_caches_set+0x141/0x150 [i915] [ 309.662114] simple_attr_write+0xc7/0xe0 [ 309.662119] full_proxy_write+0x4f/0x70 [ 309.662124] __vfs_write+0x23/0x120 [ 309.662128] vfs_write+0xc6/0x1f0 [ 309.662133] SyS_write+0x44/0xb0 [ 309.662138] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 309.662142] INITIAL USE at: [ 309.662147] __lock_acquire+0x234/0x1b50 [ 309.662151] lock_acquire+0xc9/0x220 [ 309.662156] __mutex_lock+0x6e/0x990 [ 309.662160] mutex_lock_nested+0x16/0x20 [ 309.662165] _rcu_barrier+0x31/0x160 [ 309.662169] rcu_barrier+0x10/0x20 [ 309.662174] netdev_run_todo+0x5f/0x310 [ 309.662178] rtnl_unlock+0x9/0x10 [ 309.662183] default_device_exit_batch+0x133/0x150 [ 309.662188] ops_exit_list.isra.0+0x4d/0x60 [ 309.662192] cleanup_net+0x1d8/0x2c0 [ 309.662197] process_one_work+0x1f4/0x6d0 [ 309.662202] worker_thread+0x49/0x4a0 [ 309.662206] kthread+0x107/0x140 [ 309.662210] ret_from_fork+0x2e/0x40 [ 309.662214] } [ 309.662220] ... key at: [] rcu_preempt_state+0x508/0x780 [ 309.662225] ... acquired at: [ 309.662229] check_usage_forwards+0x12b/0x130 [ 309.662233] mark_lock+0x360/0x6f0 [ 309.662237] __lock_acquire+0x638/0x1b50 [ 309.662241] lock_acquire+0xc9/0x220 [ 309.662245] __mutex_lock+0x6e/0x990 [ 309.662249] mutex_lock_nested+0x16/0x20 [ 309.662253] _rcu_barrier+0x31/0x160 [ 309.662257] rcu_barrier+0x10/0x20 [ 309.662279] i915_gem_shrink_all+0x33/0x40 [i915] [ 309.662298] i915_drop_caches_set+0x141/0x150 [i915] [ 309.662303] simple_attr_write+0xc7/0xe0 [ 309.662307] full_proxy_write+0x4f/0x70 [ 309.662311] __vfs_write+0x23/0x120 [ 309.662315] vfs_write+0xc6/0x1f0 [ 309.662319] SyS_write+0x44/0xb0 [ 309.662323] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 309.662329] stack backtrace: [ 309.662335] CPU: 1 PID: 6435 Comm: gem_exec_gttfil Tainted: G W 4.11.0-rc1-CI-CI_DRM_2333+ #1 [ 309.662342] Hardware name: Hewlett-Packard HP Compaq 8100 Elite SFF PC/304Ah, BIOS 786H1 v01.13 07/14/2011 [ 309.662348] Call Trace: [ 309.662354] dump_stack+0x67/0x92 [ 309.662359] print_irq_inversion_bug.part.19+0x1a4/0x1b0 [ 309.662365] check_usage_forwards+0x12b/0x130 [ 309.662369] mark_lock+0x360/0x6f0 [ 309.662374] ? print_shortest_lock_dependencies+0x1a0/0x1a0 [ 309.662379] __lock_acquire+0x638/0x1b50 [ 309.662383] ? __mutex_unlock_slowpath+0x3e/0x2e0 [ 309.662388] ? trace_hardirqs_on+0xd/0x10 [ 309.662392] ? _rcu_barrier+0x31/0x160 [ 309.662396] lock_acquire+0xc9/0x220 [ 309.662400] ? _rcu_barrier+0x31/0x160 [ 309.662404] ? _rcu_barrier+0x31/0x160 [ 309.662409] __mutex_lock+0x6e/0x990 [ 309.662412] ? _rcu_barrier+0x31/0x160 [ 309.662416] ? _rcu_barrier+0x31/0x160 [ 309.662421] ? synchronize_rcu_expedited+0x35/0xb0 [ 309.662426] ? _raw_spin_unlock_irqrestore+0x52/0x60 [ 309.662434] mutex_lock_nested+0x16/0x20 [ 309.662438] _rcu_barrier+0x31/0x160 [ 309.662442] rcu_barrier+0x10/0x20 [ 309.662464] i915_gem_shrink_all+0x33/0x40 [i915] [ 309.662484] i915_drop_caches_set+0x141/0x150 [i915] [ 309.662489] simple_attr_write+0xc7/0xe0 [ 309.662494] full_proxy_write+0x4f/0x70 [ 309.662498] __vfs_write+0x23/0x120 [ 309.662503] ? rcu_read_lock_sched_held+0x75/0x80 [ 309.662507] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 309.662512] ? __sb_start_write+0x102/0x210 [ 309.662516] ? vfs_write+0x17d/0x1f0 [ 309.662520] vfs_write+0xc6/0x1f0 [ 309.662524] ? trace_hardirqs_on_caller+0xe7/0x200 [ 309.662529] SyS_write+0x44/0xb0 [ 309.662533] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 309.662537] RIP: 0033:0x7f507eac24a0 [ 309.662541] RSP: 002b:00007fffda8720e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 309.662548] RAX: ffffffffffffffda RBX: ffffffff81482bd3 RCX: 00007f507eac24a0 [ 309.662552] RDX: 0000000000000005 RSI: 00007fffda8720f0 RDI: 0000000000000005 [ 309.662557] RBP: ffffc9000048bf88 R08: 0000000000000000 R09: 000000000000002c [ 309.662561] R10: 0000000000000014 R11: 0000000000000246 R12: 00007fffda872230 [ 309.662566] R13: 00007fffda872228 R14: 0000000000000201 R15: 00007fffda8720f0 [ 309.662572] ? __this_cpu_preempt_check+0x13/0x20 Fixes: 0eafec6d3244 ("drm/i915: Enable lockless lookup of request tracking via RCU") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100192 Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: # v4.9+ Link: http://patchwork.freedesktop.org/patch/msgid/20170314115019.18127-1-chris@chris-wilson.co.uk Reviewed-by: Daniel Vetter (cherry picked from commit bd784b7cc41af7a19cfb705fa6d800e511c4ab02) Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20170321144531.12344-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 401006b4c6a3..d5d2b4c6ed38 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE); - rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ + synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ return freed; } -- cgit v1.2.3 From 590379aef2e3fb7d00a093ed556c0a2714f86916 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 21 Mar 2017 14:47:20 +0000 Subject: drm/i915: make context status notifier head be per engine GVTg has introduced the context status notifier to schedule the GVTg workload. At that time, the notifier is bound to GVTg context only, so GVTg is not aware of host workloads. Now we are going to improve GVTg's guest workload scheduler policy, and add Guc emulation support for new Gen graphics. Both these two features require acknowledgment for all contexts running on hardware. (But will not alter host workload.) So here try to make some change. The change is simple: 1. Move the context status notifier head from i915_gem_context to intel_engine_cs. Which means there is a notifier head per engine instead of per context. Execlist driver still call notifier for each context sched-in/out events of current engine. 2. At GVTg side, it binds a notifier_block for each physical engine at GVTg initialization period. Then GVTg can hear all context status events. In this patch, GVTg do nothing for host context event, but later will add a function there. But in any case, the notifier callback is a noop if this is no active vGPU. Since intel_gvt_init() is called at early initialization stage and require the status notifier head has been initiated, I initiate it in intel_engine_setup(). v2: remove a redundant newline. (chris) Fixes: 3c7ba6359d70 ("drm/i915: Introduce execlist context status change notification") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100232 Signed-off-by: Changbin Du Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Cc: Zhi Wang Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170313024711.28591-1-changbin.du@intel.com Acked-by: Zhenyu Wang Signed-off-by: Chris Wilson (cherry picked from commit 3fc03069bc6e6c316f19bb526e3c8ce784677477) Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20170321144720.17020-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/scheduler.c | 45 ++++++++++++++------------------- drivers/gpu/drm/i915/i915_gem_context.c | 1 - drivers/gpu/drm/i915/i915_gem_context.h | 3 --- drivers/gpu/drm/i915/intel_engine_cs.c | 2 ++ drivers/gpu/drm/i915/intel_lrc.c | 3 ++- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 +++ 7 files changed, 27 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 23791920ced1..6dfc48b63b71 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -162,7 +162,6 @@ struct intel_vgpu { atomic_t running_workload_num; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; - struct notifier_block shadow_ctx_notifier_block; #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) struct { @@ -233,6 +232,7 @@ struct intel_gvt { struct intel_gvt_gtt gtt; struct intel_gvt_opregion opregion; struct intel_gvt_workload_scheduler scheduler; + struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); struct intel_vgpu_type *types; unsigned int num_types; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 39a83eb7aecc..c4353ed86d4b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { - struct intel_vgpu *vgpu = container_of(nb, - struct intel_vgpu, shadow_ctx_notifier_block); - struct drm_i915_gem_request *req = - (struct drm_i915_gem_request *)data; - struct intel_gvt_workload_scheduler *scheduler = - &vgpu->gvt->scheduler; + struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; + struct intel_gvt *gvt = container_of(nb, struct intel_gvt, + shadow_ctx_notifier_block[req->engine->id]); + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = scheduler->current_workload[req->engine->id]; @@ -534,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - int i; + struct intel_engine_cs *engine; + enum intel_engine_id i; gvt_dbg_core("clean workload scheduler\n"); - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (scheduler->thread[i]) { - kthread_stop(scheduler->thread[i]); - scheduler->thread[i] = NULL; - } + for_each_engine(engine, gvt->dev_priv, i) { + atomic_notifier_chain_unregister( + &engine->context_status_notifier, + &gvt->shadow_ctx_notifier_block[i]); + kthread_stop(scheduler->thread[i]); } } @@ -550,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct workload_thread_param *param = NULL; + struct intel_engine_cs *engine; + enum intel_engine_id i; int ret; - int i; gvt_dbg_core("init workload scheduler\n"); init_waitqueue_head(&scheduler->workload_complete_wq); - for (i = 0; i < I915_NUM_ENGINES; i++) { - /* check ring mask at init time */ - if (!HAS_ENGINE(gvt->dev_priv, i)) - continue; - + for_each_engine(engine, gvt->dev_priv, i) { init_waitqueue_head(&scheduler->waitq[i]); param = kzalloc(sizeof(*param), GFP_KERNEL); @@ -580,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) ret = PTR_ERR(scheduler->thread[i]); goto err; } + + gvt->shadow_ctx_notifier_block[i].notifier_call = + shadow_context_status_change; + atomic_notifier_chain_register(&engine->context_status_notifier, + &gvt->shadow_ctx_notifier_block[i]); } return 0; err: @@ -591,9 +592,6 @@ err: void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) { - atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, - &vgpu->shadow_ctx_notifier_block); - i915_gem_context_put_unlocked(vgpu->shadow_ctx); } @@ -608,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) vgpu->shadow_ctx->engine[RCS].initialised = true; - vgpu->shadow_ctx_notifier_block.notifier_call = - shadow_context_status_change; - - atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier, - &vgpu->shadow_ctx_notifier_block); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 17f90c618208..e2d83b6d376b 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << GEN8_CTX_ADDRESSING_MODE_SHIFT; - ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not * present or not in use we still need a small bias as ring wraparound diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 0ac750b90f3d..e9c008fe14b1 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -160,9 +160,6 @@ struct i915_gem_context { /** desc_template: invariant fields for the HW context descriptor */ u32 desc_template; - /** status_notifier: list of callbacks for context-switch changes */ - struct atomic_notifier_head status_notifier; - /** guilty_count: How many times this context has caused a GPU hang. */ unsigned int guilty_count; /** diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 371acf109e34..ab1be5c80ea5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; + ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); + dev_priv->engine[id] = engine; return 0; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index ebf8023d21e6..471af3b480ad 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq, if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) return; - atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); + atomic_notifier_call_chain(&rq->engine->context_status_notifier, + status, rq); } static void diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 79c2b8d72322..13dccb18cd43 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -403,6 +403,9 @@ struct intel_engine_cs { */ struct i915_gem_context *legacy_active_context; + /* status_notifier: list of callbacks for context-switch changes */ + struct atomic_notifier_head context_status_notifier; + struct intel_engine_hangcheck hangcheck; bool needs_cmd_parser; -- cgit v1.2.3 From c248c64387fac5a6b31b343d9acb78f478e8619c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 9 Mar 2017 13:26:07 +0200 Subject: nvme-rdma: handle cpu unplug when re-establishing the controller If a cpu unplug event has occured, we need to take the minimum of the provided nr_io_queues and the number of online cpus, otherwise we won't be able to connect them as blk-mq mapping won't dispatch to those queues. Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 779f516e7a4e..47a479f26e5d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl, struct ib_device *ibdev = dev->dev; int ret; - BUG_ON(queue_idx >= ctrl->queue_count); - ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); if (ret) @@ -652,8 +650,22 @@ out_free_queues: static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) { + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; int i, ret; + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret) + return ret; + + ctrl->queue_count = nr_io_queues + 1; + if (ctrl->queue_count < 2) + return 0; + + dev_info(ctrl->ctrl.device, + "creating %d I/O queues.\n", nr_io_queues); + for (i = 1; i < ctrl->queue_count; i++) { ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.opts->queue_size); @@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) { - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; int ret; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); - if (ret) - return ret; - - ctrl->queue_count = opts->nr_io_queues + 1; - if (ctrl->queue_count < 2) - return 0; - - dev_info(ctrl->ctrl.device, - "creating %d I/O queues.\n", opts->nr_io_queues); - ret = nvme_rdma_init_io_queues(ctrl); if (ret) return ret; -- cgit v1.2.3 From 945dd5bacc8978439af276976b5dcbbd42333dbc Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 13 Mar 2017 13:27:51 +0200 Subject: nvme-loop: handle cpu unplug when re-establishing the controller If a cpu unplug event has occured, we need to take the minimum of the provided nr_io_queues and the number of online cpus, otherwise we won't be able to connect them as blk-mq mapping won't dispatch to those queues. Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg --- drivers/nvme/target/loop.c | 88 ++++++++++++++++++++++++++-------------------- 1 file changed, 50 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 74e04a0855d4..22f7bc6bac7f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, struct nvme_loop_iod *iod, unsigned int queue_idx) { - BUG_ON(queue_idx >= ctrl->queue_count); - iod->req.cmd = &iod->cmd; iod->req.rsp = &iod->rsp; iod->queue = &ctrl->queues[queue_idx]; @@ -314,6 +312,43 @@ free_ctrl: kfree(ctrl); } +static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); +} + +static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; + int ret, i; + + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret || !nr_io_queues) + return ret; + + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); + + for (i = 1; i <= nr_io_queues; i++) { + ctrl->queues[i].ctrl = ctrl; + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); + if (ret) + goto out_destroy_queues; + + ctrl->queue_count++; + } + + return 0; + +out_destroy_queues: + nvme_loop_destroy_io_queues(ctrl); + return ret; +} + static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) { int error; @@ -385,17 +420,13 @@ out_free_sq: static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) { - int i; - nvme_stop_keep_alive(&ctrl->ctrl); if (ctrl->queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); - - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvme_loop_destroy_io_queues(ctrl); } if (ctrl->ctrl.state == NVME_CTRL_LIVE) @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) if (ret) goto out_disable; - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { - ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) - goto out_free_queues; - - ctrl->queue_count++; - } + ret = nvme_loop_init_io_queues(ctrl); + if (ret) + goto out_destroy_admin; - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { + for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) - goto out_free_queues; + goto out_destroy_io; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) return; -out_free_queues: - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); +out_destroy_io: + nvme_loop_destroy_io_queues(ctrl); +out_destroy_admin: nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) { - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; int ret, i; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); - if (ret || !opts->nr_io_queues) + ret = nvme_loop_init_io_queues(ctrl); + if (ret) return ret; - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", - opts->nr_io_queues); - - for (i = 1; i <= opts->nr_io_queues; i++) { - ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) - goto out_destroy_queues; - - ctrl->queue_count++; - } - memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) goto out_free_tagset; } - for (i = 1; i <= opts->nr_io_queues; i++) { + for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) goto out_cleanup_connect_q; @@ -588,8 +601,7 @@ out_cleanup_connect_q: out_free_tagset: blk_mq_free_tag_set(&ctrl->tag_set); out_destroy_queues: - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvme_loop_destroy_io_queues(ctrl); return ret; } -- cgit v1.2.3 From 65b1adebfe43c642dfe3b109edb5d992db5fbe72 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 21 Mar 2017 13:19:09 -0600 Subject: vfio: Rework group release notifier warning The intent of the original warning is make sure that the mdev vendor driver has removed any group notifiers at the point where the group is closed by the user. Theoretically this would be through an orderly shutdown where any devices are release prior to the group release. We can't always count on an orderly shutdown, the user can close the group before the notifier can be removed or the user task might be killed. We'd like to add this sanity test when the group is idle and the only references are from the devices within the group themselves, but we don't have a good way to do that. Instead check both when the group itself is removed and when the group is opened. A bit later than we'd prefer, but better than the current over aggressive approach. Fixes: ccd46dbae77d ("vfio: support notifier chain in vfio_group") Signed-off-by: Alex Williamson Cc: # v4.10 Cc: Jike Song --- drivers/vfio/vfio.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 609f4f982c74..561084ab387f 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref) struct iommu_group *iommu_group = group->iommu_group; WARN_ON(!list_empty(&group->device_list)); + WARN_ON(group->notifier.head); list_for_each_entry_safe(unbound, tmp, &group->unbound_list, unbound_next) { @@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) return -EBUSY; } + /* Warn if previous user didn't cleanup and re-init to drop them */ + if (WARN_ON(group->notifier.head)) + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); + filep->private_data = group; return 0; @@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) filep->private_data = NULL; - /* Any user didn't unregister? */ - WARN_ON(group->notifier.head); - vfio_group_try_dissolve_container(group); atomic_dec(&group->opened); -- cgit v1.2.3 From 64897b20eed29cee2b998fb5ba362e65523dba3c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 21 Mar 2017 22:19:07 +0100 Subject: cpufreq: intel_pstate: Fix policy data management in passive mode The policy->cpuinfo.max_freq and policy->max updates in intel_cpufreq_turbo_update() are excessive as they are done for no good reason and may lead to problems in principle, so they should be dropped. However, after dropping them intel_cpufreq_turbo_update() becomes almost entirely pointless, because the check made by it is made again down the road in intel_pstate_prepare_request(). The only thing in it that still needs to be done is the call to update_turbo_state(), so drop intel_cpufreq_turbo_update() altogether and make its callers invoke update_turbo_state() directly instead of it. In addition to that, fix intel_cpufreq_verify_policy() so that it checks global.no_turbo in addition to global.turbo_disabled when updating policy->cpuinfo.max_freq to make it consistent with intel_pstate_verify_policy(). Fixes: 001c76f05b01 (cpufreq: intel_pstate: Generic governors support) Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7b07803e7042..283491f742d3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2257,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) struct cpudata *cpu = all_cpu_data[policy->cpu]; update_turbo_state(); - policy->cpuinfo.max_freq = global.turbo_disabled ? + policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; cpufreq_verify_within_cpu_limits(policy); @@ -2265,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) return 0; } -static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu, - struct cpufreq_policy *policy, - unsigned int target_freq) -{ - unsigned int max_freq; - - update_turbo_state(); - - max_freq = global.no_turbo || global.turbo_disabled ? - cpu->pstate.max_freq : cpu->pstate.turbo_freq; - policy->cpuinfo.max_freq = max_freq; - if (policy->max > max_freq) - policy->max = max_freq; - - if (target_freq > max_freq) - target_freq = max_freq; - - return target_freq; -} - static int intel_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -2293,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int target_pstate; + update_turbo_state(); + freqs.old = policy->cur; - freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); + freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); switch (relation) { @@ -2326,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, struct cpudata *cpu = all_cpu_data[policy->cpu]; int target_pstate; - target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); + update_turbo_state(); + target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); intel_pstate_update_pstate(cpu, target_pstate); -- cgit v1.2.3 From ad0a45fd9c14feebd000b6e84189d0edff265170 Mon Sep 17 00:00:00 2001 From: Vaidyanathan Srinivasan Date: Sun, 19 Mar 2017 00:51:59 +0530 Subject: cpuidle: Validate cpu_dev in cpuidle_add_sysfs() If a given cpu is not in cpu_present and cpu hotplug is disabled, arch can skip setting up the cpu_dev. Arch cpuidle driver should pass correct cpu mask for registration, but failing to do so by the driver causes error to propagate and crash like this: [ 30.076045] Unable to handle kernel paging request for data at address 0x00000048 [ 30.076100] Faulting instruction address: 0xc0000000007b2f30 cpu 0x4d: Vector: 300 (Data Access) at [c000003feb18b670] pc: c0000000007b2f30: kobject_get+0x20/0x70 lr: c0000000007b3c94: kobject_add_internal+0x54/0x3f0 sp: c000003feb18b8f0 msr: 9000000000009033 dar: 48 dsisr: 40000000 current = 0xc000003fd2ed8300 paca = 0xc00000000fbab500 softe: 0 irq_happened: 0x01 pid = 1, comm = swapper/0 Linux version 4.11.0-rc2-svaidy+ (sv@sagarika) (gcc version 6.2.0 20161005 (Ubuntu 6.2.0-5ubuntu12) ) #10 SMP Sun Mar 19 00:08:09 IST 2017 enter ? for help [c000003feb18b960] c0000000007b3c94 kobject_add_internal+0x54/0x3f0 [c000003feb18b9f0] c0000000007b43a4 kobject_init_and_add+0x64/0xa0 [c000003feb18ba70] c000000000e284f4 cpuidle_add_sysfs+0xb4/0x130 [c000003feb18baf0] c000000000e26038 cpuidle_register_device+0x118/0x1c0 [c000003feb18bb30] c000000000e26c48 cpuidle_register+0x78/0x120 [c000003feb18bbc0] c00000000168fd9c powernv_processor_idle_init+0x110/0x1c4 [c000003feb18bc40] c00000000000cff8 do_one_initcall+0x68/0x1d0 [c000003feb18bd00] c0000000016242f4 kernel_init_freeable+0x280/0x360 [c000003feb18bdc0] c00000000000d864 kernel_init+0x24/0x160 [c000003feb18be30] c00000000000b4e8 ret_from_kernel_thread+0x5c/0x74 Validating cpu_dev fixes the crash and reports correct error message like: [ 30.163506] Failed to register cpuidle device for cpu136 [ 30.173329] Registration of powernv driver failed. Signed-off-by: Vaidyanathan Srinivasan [ rjw: Comment massage ] Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/sysfs.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index c5adc8c9ac43..ae948b1da93a 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); int error; + /* + * Return if cpu_device is not setup for this CPU. + * + * This could happen if the arch did not set up cpu_device + * since this CPU is not in cpu_present mask and the + * driver did not send a correct CPU mask during registration. + * Without this check we would end up passing bogus + * value for &cpu_dev->kobj in kobject_init_and_add() + */ + if (!cpu_dev) + return -ENODEV; + kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); if (!kdev) return -ENOMEM; -- cgit v1.2.3 From 98d068ab52b4b11d403995ed14154660797e7136 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Tue, 14 Mar 2017 14:15:20 +0800 Subject: r8152: fix the list rx_done may be used without initialization The list rx_done would be initialized when the linking on occurs. Therefore, if a napi is scheduled without any linking on before, the following kernel panic would happen. BUG: unable to handle kernel NULL pointer dereference at 000000000000008 IP: [] r8152_poll+0xe1e/0x1210 [r8152] PGD 0 Oops: 0002 [#1] SMP Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 986243c932cc..bb3eedd07fbe 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1362,6 +1362,7 @@ static int alloc_all_mem(struct r8152 *tp) spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); INIT_LIST_HEAD(&tp->tx_free); + INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); -- cgit v1.2.3 From 16320f363ae128d9b9c70e60f00f2a572f57c23d Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Wed, 15 Mar 2017 09:32:15 +0800 Subject: vhost-vsock: add pkt cancel capability To allow canceling all packets of a connection. Reviewed-by: Stefan Hajnoczi Reviewed-by: Jorgen Hansen Signed-off-by: Peng Tao Signed-off-by: David S. Miller --- drivers/vhost/vsock.c | 41 +++++++++++++++++++++++++++++++++++++++++ include/net/af_vsock.h | 3 +++ 2 files changed, 44 insertions(+) (limited to 'drivers') diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index ce5e63d2c66a..44eed8eb0725 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) return len; } +static int +vhost_transport_cancel_pkt(struct vsock_sock *vsk) +{ + struct vhost_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; + LIST_HEAD(freeme); + + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); + if (!vsock) + return -ENODEV; + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->vsk != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsigned int in) @@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = { .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, + .cancel_pkt = vhost_transport_cancel_pkt, .dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_dequeue = virtio_transport_dgram_dequeue, diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f2758964ce6f..f32ed9ac181a 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -100,6 +100,9 @@ struct vsock_transport { void (*destruct)(struct vsock_sock *); void (*release)(struct vsock_sock *); + /* Cancel all pending packets sent on vsock. */ + int (*cancel_pkt)(struct vsock_sock *vsk); + /* Connections. */ int (*connect)(struct vsock_sock *); -- cgit v1.2.3 From 09050957fae896e001498af1aa35c446a11cb47d Mon Sep 17 00:00:00 2001 From: Yaroslav Isakov Date: Thu, 16 Mar 2017 22:44:10 +0300 Subject: tun: fix inability to set offloads after disabling them via ethtool Added missing logic in tun driver, which prevents apps to set offloads using tun ioctl, if offloads were previously disabled via ethtool Signed-off-by: Yaroslav Isakov Signed-off-by: David S. Miller --- drivers/net/tun.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 34cc3c590aa5..cc88cd7856f5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) return -EINVAL; tun->set_features = features; + tun->dev->wanted_features &= ~TUN_USER_FEATURES; + tun->dev->wanted_features |= features; netdev_update_features(tun->dev); return 0; -- cgit v1.2.3 From 6bd845d1cf98b45c634baacb8381436dad3c2dd0 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Fri, 17 Mar 2017 17:20:48 +0100 Subject: qmi_wwan: add Dell DW5811e MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a Dell branded Sierra Wireless EM7455. It is operating in MBIM mode by default, but can be configured to provide two QMI/RMNET functions. Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 805674550683..f8d55aa058ec 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -925,6 +925,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ -- cgit v1.2.3 From 13e2d5187f6b965ba3556caedb914baf81b98ed2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 17 Mar 2017 23:52:35 +0300 Subject: bna: integer overflow bug in debugfs We could allocate less memory than intended because we do: bnad->regdata = kzalloc(len << 2, GFP_KERNEL); The shift can overflow leading to a crash. This is debugfs code so the impact is very small. Fixes: 7afc5dbde091 ("bna: Add debugfs interface.") Signed-off-by: Dan Carpenter Acked-by: Rasesh Mody Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bnad_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 05c1c1dd7751..cebfe3bd086e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); - if (rc < 2) { + if (rc < 2 || len > UINT_MAX >> 2) { netdev_warn(bnad->netdev, "failed to read user buffer\n"); kfree(kern_buf); return -EINVAL; -- cgit v1.2.3 From 3dc857f0e8fc22610a59cbb346ba62c6e921863f Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 17 Mar 2017 16:07:11 -0700 Subject: net: vrf: Reset rt6i_idev in local dst after put The VRF driver takes a reference to the inet6_dev on the VRF device for its rt6_local dst when handling local traffic through the VRF device as a loopback. When the device is deleted the driver does a put on the idev but does not reset rt6i_idev in the rt6_info struct. When the dst is destroyed, dst_destroy calls ip6_dst_destroy which does a second put for what is essentially the same reference causing it to be prematurely freed. Reset rt6i_idev after the put in the vrf driver. Fixes: b4869aa2f881e ("net: vrf: ipv6 support for local traffic to local addresses") Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index fea687f35b5a..d6988db1930d 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) } if (rt6_local) { - if (rt6_local->rt6i_idev) + if (rt6_local->rt6i_idev) { in6_dev_put(rt6_local->rt6i_idev); + rt6_local->rt6i_idev = NULL; + } dst = &rt6_local->dst; dev_put(dst->dev); -- cgit v1.2.3 From ff010472fb75670cb5c08671e820eeea3af59c87 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 21 Mar 2017 11:36:06 +0530 Subject: cpufreq: Restore policy min/max limits on CPU online On CPU online the cpufreq core restores the previous governor (or the previous "policy" setting for ->setpolicy drivers), but it does not restore the min/max limits at the same time, which is confusing, inconsistent and real pain for users who set the limits and then suspend/resume the system (using full suspend), in which case the limits are reset on all CPUs except for the boot one. Fix this by making cpufreq_online() restore the limits when an inactive policy is brought online. The commit log and patch are inspired from Rafael's earlier work. Reported-by: Rafael J. Wysocki Signed-off-by: Viresh Kumar Cc: 4.3+ # 4.3+ Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b8ff617d449d..5dbdd261aa73 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1184,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu) for_each_cpu(j, policy->related_cpus) per_cpu(cpufreq_cpu_data, j) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); + } else { + policy->min = policy->user_policy.min; + policy->max = policy->user_policy.max; } if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { -- cgit v1.2.3 From 4071898bf0f4d79ff353db327af2a15123272548 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Sun, 19 Mar 2017 09:19:57 -0700 Subject: net: qmi_wwan: Add USB IDs for MDM6600 modem on Motorola Droid 4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This gets qmicli working with the MDM6600 modem. Cc: Bjørn Mork Reviewed-by: Sebastian Reichel Tested-by: Sebastian Reichel Signed-off-by: Tony Lindgren Acked-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f8d55aa058ec..156f7f85e486 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -580,6 +580,10 @@ static const struct usb_device_id products[] = { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Motorola Mapphone devices with MDM6600 */ + USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff), + .driver_info = (unsigned long)&qmi_wwan_info, + }, /* 2. Combined interface devices matching on class+protocol */ { /* Huawei E367 and possibly others in "Windows mode" */ -- cgit v1.2.3 From dd7406dd334a98ada3ff5371847a3eeb4ba16313 Mon Sep 17 00:00:00 2001 From: Alex Hemme Date: Tue, 7 Mar 2017 14:38:29 -0500 Subject: hwmon: (max31790) Set correct PWM value Traced fans not spinning to incorrect PWM value being written. The passed in value was written instead of the calulated value. Fixes: 54187ff9d766 ("hwmon: (max31790) Convert to use new hwmon registration API") Signed-off-by: Alex Hemme Signed-off-by: Guenter Roeck --- drivers/hwmon/max31790.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c index c1b9275978f9..281491cca510 100644 --- a/drivers/hwmon/max31790.c +++ b/drivers/hwmon/max31790.c @@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel, data->pwm[channel] = val << 8; err = i2c_smbus_write_word_swapped(client, MAX31790_REG_PWMOUT(channel), - val); + data->pwm[channel]); break; case hwmon_pwm_enable: fan_config = data->fan_config[channel]; -- cgit v1.2.3 From 8358378b22518d92424597503d3c1cd302a490b6 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 12 Mar 2017 06:18:58 -0700 Subject: hwmon: (it87) Avoid registering the same chip on both SIO addresses IT8705F is known to respond on both SIO addresses. Registering it twice may result in system lockups. Reported-by: Russell King Fixes: e84bd9535e2b ("hwmon: (it87) Add support for second Super-IO chip") Signed-off-by: Guenter Roeck --- drivers/hwmon/it87.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index efb01c247e2d..4dfc7238313e 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void) { int sioaddr[2] = { REG_2E, REG_4E }; struct it87_sio_data sio_data; - unsigned short isa_address; + unsigned short isa_address[2]; bool found = false; int i, err; @@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void) for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { memset(&sio_data, 0, sizeof(struct it87_sio_data)); - isa_address = 0; - err = it87_find(sioaddr[i], &isa_address, &sio_data); - if (err || isa_address == 0) + isa_address[i] = 0; + err = it87_find(sioaddr[i], &isa_address[i], &sio_data); + if (err || isa_address[i] == 0) continue; + /* + * Don't register second chip if its ISA address matches + * the first chip's ISA address. + */ + if (i && isa_address[i] == isa_address[0]) + break; - err = it87_device_add(i, isa_address, &sio_data); + err = it87_device_add(i, isa_address[i], &sio_data); if (err) goto exit_dev_unregister; + found = true; + + /* + * IT8705F may respond on both SIO addresses. + * Stop probing after finding one. + */ + if (sio_data.type == it87) + break; } if (!found) { -- cgit v1.2.3 From de288e36fe33f7e06fa272bc8e2f85aa386d99aa Mon Sep 17 00:00:00 2001 From: Janusz Dziedzic Date: Mon, 13 Mar 2017 14:11:32 +0200 Subject: usb: dwc3: gadget: delay unmap of bounced requests In the case of bounced ep0 requests, we must delay DMA operation until after ->complete() otherwise we might overwrite contents of req->buf. This caused problems with RNDIS gadget. Signed-off-by: Janusz Dziedzic Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/gadget.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0d75158e43fe..79e7a3480d51 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; + unsigned int unmap_after_complete = false; req->started = false; list_del(&req->list); @@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (req->request.status == -EINPROGRESS) req->request.status = status; - if (dwc->ep0_bounced && dep->number <= 1) + /* + * NOTICE we don't want to unmap before calling ->complete() if we're + * dealing with a bounced ep0 request. If we unmap it here, we would end + * up overwritting the contents of req->buf and this could confuse the + * gadget driver. + */ + if (dwc->ep0_bounced && dep->number <= 1) { dwc->ep0_bounced = false; - - usb_gadget_unmap_request_by_dev(dwc->sysdev, - &req->request, req->direction); + unmap_after_complete = true; + } else { + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); + } trace_dwc3_gadget_giveback(req); @@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); + if (unmap_after_complete) + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); + if (dep->number > 1) pm_runtime_put(dwc->dev); } -- cgit v1.2.3 From 74098c4ac782afd71b35ac56f9536ae2f5cd7da7 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 14 Mar 2017 12:09:56 +0100 Subject: usb: gadget: acm: fix endianness in notifications MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The gadget code exports the bitfield for serial status changes over the wire in its internal endianness. The fix is to convert to little endian before sending it over the wire. Signed-off-by: Oliver Neukum Tested-by: 家瑋 CC: Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_acm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index a30766ca4226..5e3828d9dac7 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm) { struct usb_composite_dev *cdev = acm->port.func.config->cdev; int status; + __le16 serial_state; spin_lock(&acm->lock); if (acm->notify_req) { dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", acm->port_num, acm->serial_state); + serial_state = cpu_to_le16(acm->serial_state); status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, - 0, &acm->serial_state, sizeof(acm->serial_state)); + 0, &serial_state, sizeof(acm->serial_state)); } else { acm->pending = true; status = 0; -- cgit v1.2.3 From 09424c50b7dff40cb30011c09114404a4656e023 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Wed, 8 Mar 2017 16:05:43 +0200 Subject: usb: gadget: f_uvc: Fix SuperSpeed companion descriptor's wBytesPerInterval The streaming_maxburst module parameter is 0 offset (0..15) so we must add 1 while using it for wBytesPerInterval calculation for the SuperSpeed companion descriptor. Without this host uvcvideo driver will always see the wrong wBytesPerInterval for SuperSpeed uvc gadget and may not find a suitable video interface endpoint. e.g. for streaming_maxburst = 0 case it will always fail as wBytePerInterval was evaluating to 0. Cc: stable@vger.kernel.org Reviewed-by: Laurent Pinchart Signed-off-by: Roger Quadros Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_uvc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 29b41b5dee04..c7689d05356c 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -625,7 +625,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; uvc_ss_streaming_comp.wBytesPerInterval = cpu_to_le16(max_packet_size * max_packet_mult * - opts->streaming_maxburst); + (opts->streaming_maxburst + 1)); /* Allocate endpoints. */ ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); -- cgit v1.2.3 From 16bb05d98c904a4f6c5ce7e2d992299f794acbf2 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Wed, 8 Mar 2017 16:05:44 +0200 Subject: usb: gadget: f_uvc: Sanity check wMaxPacketSize for SuperSpeed As per USB3.0 Specification "Table 9-20. Standard Endpoint Descriptor", for interrupt and isochronous endpoints, wMaxPacketSize must be set to 1024 if the endpoint defines bMaxBurst to be greater than zero. Reviewed-by: Laurent Pinchart Signed-off-by: Roger Quadros Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_uvc.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index c7689d05356c..f8a1881609a2 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); + /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */ + if (opts->streaming_maxburst && + (opts->streaming_maxpacket % 1024) != 0) { + opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024); + INFO(cdev, "overriding streaming_maxpacket to %d\n", + opts->streaming_maxpacket); + } + /* Fill in the FS/HS/SS Video Streaming specific descriptors from the * module parameters. * -- cgit v1.2.3 From 1f459262b0e1649a1e5ad12fa4c66eb76c2220ce Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 10 Mar 2017 15:39:32 -0600 Subject: usb: gadget: udc: remove pointer dereference after free Remove pointer dereference after free. Addresses-Coverity-ID: 1091173 Acked-by: Michal Nazarewicz Signed-off-by: Gustavo A. R. Silva Signed-off-by: Felipe Balbi --- drivers/usb/gadget/udc/pch_udc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index a97da645c1b9..8a365aad66fe 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, td = phys_to_virt(addr); addr2 = (dma_addr_t)td->next; pci_pool_free(dev->data_requests, td, addr); - td->next = 0x00; addr = addr2; } req->chain_len = 1; -- cgit v1.2.3 From 25cd9721c2b16ee0d775e36ec3af31f392003f80 Mon Sep 17 00:00:00 2001 From: Krzysztof Opasiak Date: Tue, 31 Jan 2017 18:12:31 +0100 Subject: usb: gadget: f_hid: fix: Don't access hidg->req without spinlock held hidg->req should be accessed only with write_spinlock held as it is set to NULL when we get disabled by host. Signed-off-by: Krzysztof Opasiak Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_hid.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 89b48bcc377a..5eea44823ca0 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -367,7 +367,7 @@ try_again: count = min_t(unsigned, count, hidg->report_length); spin_unlock_irqrestore(&hidg->write_spinlock, flags); - status = copy_from_user(hidg->req->buf, buffer, count); + status = copy_from_user(req->buf, buffer, count); if (status != 0) { ERROR(hidg->func.config->cdev, @@ -378,9 +378,9 @@ try_again: spin_lock_irqsave(&hidg->write_spinlock, flags); - /* we our function has been disabled by host */ + /* when our function has been disabled by host */ if (!hidg->req) { - free_ep_req(hidg->in_ep, hidg->req); + free_ep_req(hidg->in_ep, req); /* * TODO * Should we fail with error here? @@ -394,7 +394,7 @@ try_again: req->complete = f_hidg_req_complete; req->context = hidg; - status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); -- cgit v1.2.3 From 90400c5884b89a5db232049721ef4dc1ab2f1f1c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 24 Feb 2017 14:35:54 +0200 Subject: extcon: int3496: Rename GPIO pins in accordance with binding Update GPIO pin names in extcon-intel-int3496.c driver to follow the existing extcon binding. Signed-off-by: Andy Shevchenko Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-intel-int3496.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index 38eb6cab938f..81713bf7487e 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -105,13 +105,13 @@ static int int3496_probe(struct platform_device *pdev) return data->usb_id_irq; } - data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", + data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus", INT3496_GPIO_VBUS_EN, GPIOD_ASIS); if (IS_ERR(data->gpio_vbus_en)) dev_info(dev, "can't request VBUS EN GPIO\n"); - data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", + data->gpio_usb_mux = devm_gpiod_get_index(dev, "mux", INT3496_GPIO_USB_MUX, GPIOD_ASIS); if (IS_ERR(data->gpio_usb_mux)) -- cgit v1.2.3 From 8cb2cbae45ae46b1e1be16950670d5c5d4408474 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 24 Feb 2017 14:35:55 +0200 Subject: extcon: int3496: Add GPIO ACPI mapping table In order to make GPIO ACPI library stricter prepare users of gpiod_get_index() to correctly behave when there no mapping is provided by firmware. Here we add explicit mapping between _CRS GpioIo() resources and their names used in the driver. Signed-off-by: Andy Shevchenko Signed-off-by: Chanwoo Choi --- Documentation/extcon/intel-int3496.txt | 5 +++++ drivers/extcon/extcon-intel-int3496.c | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'drivers') diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt index af0b366c25b7..8155dbc7fad3 100644 --- a/Documentation/extcon/intel-int3496.txt +++ b/Documentation/extcon/intel-int3496.txt @@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg Index 2: The output gpio for muxing of the data pins between the USB host and the USB peripheral controller, write 1 to mux to the peripheral controller + +There is a mapping between indices and GPIO connection IDs as follows + id index 0 + vbus index 1 + mux index 2 diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index 81713bf7487e..22a529eb1b1a 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = { EXTCON_NONE, }; +static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false }; +static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false }; +static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false }; + +static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = { + { "id-gpios", &id_gpios, 1 }, + { "vbus-gpios", &vbus_gpios, 1 }, + { "mux-gpios", &mux_gpios, 1 }, + { }, +}; + static void int3496_do_usb_id(struct work_struct *work) { struct int3496_data *data = @@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev) struct int3496_data *data; int ret; + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), + acpi_int3496_default_gpios); + if (ret) { + dev_err(dev, "can't add GPIO ACPI mapping\n"); + return ret; + } + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -154,6 +172,8 @@ static int int3496_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, data->usb_id_irq, data); cancel_delayed_work_sync(&data->work); + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); + return 0; } -- cgit v1.2.3 From 059c7874b87575e48d2c7702849ff56017059195 Mon Sep 17 00:00:00 2001 From: Peter Robinson Date: Thu, 2 Mar 2017 18:10:10 +0000 Subject: extcon: int3496: Add dependency on X86 as it's Intel specific Add dependency on X86 so it doesn't show up on other arches and add a option for compile test so it still gets build coverage. Signed-off-by: Peter Robinson Signed-off-by: Chanwoo Choi --- drivers/extcon/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 96bbae579c0b..fc09c76248b4 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -44,7 +44,7 @@ config EXTCON_GPIO config EXTCON_INTEL_INT3496 tristate "Intel INT3496 ACPI device extcon driver" - depends on GPIOLIB && ACPI + depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST) help Say Y here to enable extcon support for USB OTG ports controlled by an Intel INT3496 ACPI device. -- cgit v1.2.3 From 408c5b41d215b317ab58c98b959454407ee4a53d Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 10 Mar 2017 21:52:08 +0100 Subject: extcon: int3496: Use gpiod_get instead of gpiod_get_index Now that we've an acpi mapping table we should be using gpiod_get instead of gpiod_get_index. Signed-off-by: Hans de Goede Reviewed-by: Andy Shevchenko Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-intel-int3496.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index 22a529eb1b1a..00f0e60a00ce 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -108,9 +108,7 @@ static int int3496_probe(struct platform_device *pdev) data->dev = dev; INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); - data->gpio_usb_id = devm_gpiod_get_index(dev, "id", - INT3496_GPIO_USB_ID, - GPIOD_IN); + data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN); if (IS_ERR(data->gpio_usb_id)) { ret = PTR_ERR(data->gpio_usb_id); dev_err(dev, "can't request USB ID GPIO: %d\n", ret); @@ -123,15 +121,11 @@ static int int3496_probe(struct platform_device *pdev) return data->usb_id_irq; } - data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus", - INT3496_GPIO_VBUS_EN, - GPIOD_ASIS); + data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS); if (IS_ERR(data->gpio_vbus_en)) dev_info(dev, "can't request VBUS EN GPIO\n"); - data->gpio_usb_mux = devm_gpiod_get_index(dev, "mux", - INT3496_GPIO_USB_MUX, - GPIOD_ASIS); + data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS); if (IS_ERR(data->gpio_usb_mux)) dev_info(dev, "can't request USB MUX GPIO\n"); -- cgit v1.2.3 From 70216fd937fea79c20705400540fa48f86ddf1c5 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 13 Mar 2017 12:28:34 +0100 Subject: extcon: int3496: Set the id pin to direction-input if necessary With the new more strict ACPI gpio code the dsdt's IoRestriction flags are honored on gpiod_get, but in some dsdt's it is wrong, so explicitly call gpiod_direction_input on the id gpio if necessary. This fixes the following errors when the int3496 code is used together with the new more strict ACPI gpio code: [ 2382.484415] gpio gpiochip1: (INT33FF:01): gpiochip_lock_as_irq: tried to flag a GPIO set as output for IRQ [ 2382.484425] gpio gpiochip1: (INT33FF:01): unable to lock HW IRQ 3 for IRQ [ 2382.484429] genirq: Failed to request resources for INT3496:00 (irq 174) on irqchip chv-gpio [ 2382.484518] intel-int3496 INT3496:00: can't request IRQ for USB ID GPIO: -22 [ 2382.500359] intel-int3496: probe of INT3496:00 failed with error -22 Signed-off-by: Hans de Goede Reviewed-by: Andy Shevchenko Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-intel-int3496.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index 00f0e60a00ce..9d17984bbbd4 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -113,6 +113,9 @@ static int int3496_probe(struct platform_device *pdev) ret = PTR_ERR(data->gpio_usb_id); dev_err(dev, "can't request USB ID GPIO: %d\n", ret); return ret; + } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) { + dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n"); + gpiod_direction_input(data->gpio_usb_id); } data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); -- cgit v1.2.3 From 5003ae1e735e6bfe4679d9bed6846274f322e77e Mon Sep 17 00:00:00 2001 From: Koos Vriezen Date: Wed, 1 Mar 2017 21:02:50 +0100 Subject: iommu/vt-d: Fix NULL pointer dereference in device_to_iommu The function device_to_iommu() in the Intel VT-d driver lacks a NULL-ptr check, resulting in this oops at boot on some platforms: BUG: unable to handle kernel NULL pointer dereference at 00000000000007ab IP: [] device_to_iommu+0x11a/0x1a0 PGD 0 [...] Call Trace: ? find_or_alloc_domain.constprop.29+0x1a/0x300 ? dw_dma_probe+0x561/0x580 [dw_dmac_core] ? __get_valid_domain_for_dev+0x39/0x120 ? __intel_map_single+0x138/0x180 ? intel_alloc_coherent+0xb6/0x120 ? sst_hsw_dsp_init+0x173/0x420 [snd_soc_sst_haswell_pcm] ? mutex_lock+0x9/0x30 ? kernfs_add_one+0xdb/0x130 ? devres_add+0x19/0x60 ? hsw_pcm_dev_probe+0x46/0xd0 [snd_soc_sst_haswell_pcm] ? platform_drv_probe+0x30/0x90 ? driver_probe_device+0x1ed/0x2b0 ? __driver_attach+0x8f/0xa0 ? driver_probe_device+0x2b0/0x2b0 ? bus_for_each_dev+0x55/0x90 ? bus_add_driver+0x110/0x210 ? 0xffffffffa11ea000 ? driver_register+0x52/0xc0 ? 0xffffffffa11ea000 ? do_one_initcall+0x32/0x130 ? free_vmap_area_noflush+0x37/0x70 ? kmem_cache_alloc+0x88/0xd0 ? do_init_module+0x51/0x1c4 ? load_module+0x1ee9/0x2430 ? show_taint+0x20/0x20 ? kernel_read_file+0xfd/0x190 ? SyS_finit_module+0xa3/0xb0 ? do_syscall_64+0x4a/0xb0 ? entry_SYSCALL64_slow_path+0x25/0x25 Code: 78 ff ff ff 4d 85 c0 74 ee 49 8b 5a 10 0f b6 9b e0 00 00 00 41 38 98 e0 00 00 00 77 da 0f b6 eb 49 39 a8 88 00 00 00 72 ce eb 8f <41> f6 82 ab 07 00 00 04 0f 85 76 ff ff ff 0f b6 4d 08 88 0e 49 RIP [] device_to_iommu+0x11a/0x1a0 RSP CR2: 00000000000007ab ---[ end trace 16f974b6d58d0aad ]--- Add the missing pointer check. Fixes: 1c387188c60f53b338c20eee32db055dfe022a9b ("iommu/vt-d: Fix IOMMU lookup for SR-IOV Virtual Functions") Signed-off-by: Koos Vriezen Cc: stable@vger.kernel.org # 4.8.15+ Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 238ad3447712..91d60493b57c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf * which we used for the IOMMU lookup. Strictly speaking * we could do this for all PCI devices; we only need to * get the BDF# from the scope table for ACPI matches. */ - if (pdev->is_virtfn) + if (pdev && pdev->is_virtfn) goto got_pdev; *bus = drhd->devices[i].bus; -- cgit v1.2.3 From 6e2e0eea072f6f82ac0afbcfa8dc38dcc3a29fa4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 9 Feb 2017 13:09:08 -0200 Subject: [media] coda/imx-vdoa: platform_driver should not be const The device driver platform is actually written to during registration, for setting the owner field, so platform_driver_register() does not take a const pointer: drivers/media/platform/coda/imx-vdoa.c: In function 'vdoa_driver_init': drivers/media/platform/coda/imx-vdoa.c:333:213: error: passing argument 1 of '__platform_driver_register' discards 'const' qualifier from pointer target type [-Werror=discarded-qualifiers] module_platform_driver(vdoa_driver); In file included from drivers/media/platform/coda/imx-vdoa.c:22:0: include/linux/platform_device.h:199:12: note: expected 'struct platform_driver *' but argument is of type 'const struct platform_driver *' extern int __platform_driver_register(struct platform_driver *, ^~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/media/platform/coda/imx-vdoa.c: In function 'vdoa_driver_exit': drivers/media/platform/coda/imx-vdoa.c:333:626: error: passing argument 1 of 'platform_driver_unregister' discards 'const' qualifier from pointer target type [-Werror=discarded-qualifiers] Remove the modifier again. Fixes: d2fe28feaebb ("[media] coda/imx-vdoa: constify structs") Signed-off-by: Arnd Bergmann Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/platform/coda/imx-vdoa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c index 67fd8ffa60a4..669a4c82f1ff 100644 --- a/drivers/media/platform/coda/imx-vdoa.c +++ b/drivers/media/platform/coda/imx-vdoa.c @@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, vdoa_dt_ids); -static const struct platform_driver vdoa_driver = { +static struct platform_driver vdoa_driver = { .probe = vdoa_probe, .remove = vdoa_remove, .driver = { -- cgit v1.2.3 From 389e3f0d57e6ab50c207bee5ea2c4a5982a029c2 Mon Sep 17 00:00:00 2001 From: Shailendra Verma Date: Fri, 2 Dec 2016 02:48:01 -0200 Subject: [media] bdisp: Clean up file handle in open() error path The File handle is not yet added in the vdev list.So no need to call v4l2_fh_del(&ctx->fh)if it fails to create control. Signed-off-by: Shailendra Verma Reviewed-by: Fabien Dessenne Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/platform/sti/bdisp/bdisp-v4l2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index 823608112d89..7918b928f058 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -632,8 +632,8 @@ static int bdisp_open(struct file *file) error_ctrls: bdisp_ctrls_delete(ctx); -error_fh: v4l2_fh_del(&ctx->fh); +error_fh: v4l2_fh_exit(&ctx->fh); bdisp_hw_free_nodes(ctx); mem_ctx: -- cgit v1.2.3 From 24a47426066c8ba16a4db1b20a41d63187281195 Mon Sep 17 00:00:00 2001 From: Thibault Saunier Date: Wed, 1 Feb 2017 18:05:21 -0200 Subject: [media] exynos-gsc: Do not swap cb/cr for semi planar formats In the case of semi planar formats cb and cr are in the same plane in memory, meaning that will be set to 'cb' whatever the format is, and whatever the (packed) order of those components are. Suggested-by: Nicolas Dufresne Signed-off-by: Thibault Saunier Signed-off-by: Javier Martinez Canillas Acked-by: Sylwester Nawrocki Signed-off-by: Mauro Carvalho Chehab --- drivers/media/platform/exynos-gsc/gsc-core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index cbb03768f5d7..0f0c389f8897 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c @@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb, if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || - (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) || (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || - (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) || (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) swap(addr->cb, addr->cr); -- cgit v1.2.3 From 7d2aa6b814476a2e2794960f844344519246df72 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 20 Mar 2017 10:17:56 +0100 Subject: iommu/exynos: Block SYSMMU while invalidating FLPD cache Documentation specifies that SYSMMU should be in blocked state while performing TLB/FLPD cache invalidation, so add needed calls to sysmmu_block/unblock. Fixes: 66a7ed84b345d ("iommu/exynos: Apply workaround of caching fault page table entries") CC: stable@vger.kernel.org # v4.10+ Signed-off-by: Marek Szyprowski Signed-off-by: Joerg Roedel --- drivers/iommu/exynos-iommu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index a7e0821c9967..32d43f1994e4 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -512,7 +512,10 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, spin_lock_irqsave(&data->lock, flags); if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { clk_enable(data->clk_master); - __sysmmu_tlb_invalidate_entry(data, iova, 1); + if (sysmmu_block(data)) { + __sysmmu_tlb_invalidate_entry(data, iova, 1); + sysmmu_unblock(data); + } clk_disable(data->clk_master); } spin_unlock_irqrestore(&data->lock, flags); -- cgit v1.2.3 From cd37a296a9f890586665bb8974a8b17ee2f17d6d Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 20 Mar 2017 10:17:57 +0100 Subject: iommu/exynos: Workaround FLPD cache flush issues for SYSMMU v5 For some unknown reasons, in some cases, FLPD cache invalidation doesn't work properly with SYSMMU v5 controllers found in Exynos5433 SoCs. This can be observed by a firmware crash during initialization phase of MFC video decoder available in the mentioned SoCs when IOMMU support is enabled. To workaround this issue perform a full TLB/FLPD invalidation in case of replacing any first level page descriptors in case of SYSMMU v5. Fixes: 740a01eee9ada ("iommu/exynos: Add support for v5 SYSMMU") CC: stable@vger.kernel.org # v4.10+ Signed-off-by: Marek Szyprowski Tested-by: Andrzej Hajda Signed-off-by: Joerg Roedel --- drivers/iommu/exynos-iommu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 32d43f1994e4..c01bfcdb2383 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -513,7 +513,10 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { clk_enable(data->clk_master); if (sysmmu_block(data)) { - __sysmmu_tlb_invalidate_entry(data, iova, 1); + if (data->version >= MAKE_MMU_VER(5, 0)) + __sysmmu_tlb_invalidate(data); + else + __sysmmu_tlb_invalidate_entry(data, iova, 1); sysmmu_unblock(data); } clk_disable(data->clk_master); -- cgit v1.2.3 From 9d3a4de4cb8db8e71730e36736272ef041836f68 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 16 Mar 2017 17:00:16 +0000 Subject: iommu: Disambiguate MSI region types The introduction of reserved regions has left a couple of rough edges which we could do with sorting out sooner rather than later. Since we are not yet addressing the potential dynamic aspect of software-managed reservations and presenting them at arbitrary fixed addresses, it is incongruous that we end up displaying hardware vs. software-managed MSI regions to userspace differently, especially since ARM-based systems may actually require one or the other, or even potentially both at once, (which iommu-dma currently has no hope of dealing with at all). Let's resolve the former user-visible inconsistency ASAP before the ABI has been baked into a kernel release, in a way that also lays the groundwork for the latter shortcoming to be addressed by follow-up patches. For clarity, rename the software-managed type to IOMMU_RESV_SW_MSI, use IOMMU_RESV_MSI to describe the hardware type, and document everything a little bit. Since the x86 MSI remapping hardware falls squarely under this meaning of IOMMU_RESV_MSI, apply that type to their regions as well, so that we tell the same story to userspace across all platforms. Secondly, as the various region types require quite different handling, and it really makes little sense to ever try combining them, convert the bitfield-esque #defines to a plain enum in the process before anyone gets the wrong impression. Fixes: d30ddcaa7b02 ("iommu: Add a new type field in iommu_resv_region") Reviewed-by: Eric Auger CC: Alex Williamson CC: David Woodhouse CC: kvm@vger.kernel.org Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/intel-iommu.c | 2 +- drivers/iommu/iommu.c | 5 +++-- drivers/vfio/vfio_iommu_type1.c | 7 +++---- include/linux/iommu.h | 18 +++++++++++++----- 7 files changed, 23 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 98940d1392cb..b17536d6e69b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_MSI); if (!region) return; list_add_tail(®ion->list, head); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5806a6acc94e..591bb96047c9 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_SW_MSI); if (!region) return; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index abf6496843a6..b493c99e17f7 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_SW_MSI); if (!region) return; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 91d60493b57c..d412a313a372 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device, reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_MSI); if (!reg) return; list_add_tail(®->list, head); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8ea14f41a979..3b67144dead2 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = { [IOMMU_RESV_DIRECT] = "direct", [IOMMU_RESV_RESERVED] = "reserved", [IOMMU_RESV_MSI] = "msi", + [IOMMU_RESV_SW_MSI] = "msi", }; #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ @@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list) } struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, - size_t length, - int prot, int type) + size_t length, int prot, + enum iommu_resv_type type) { struct iommu_resv_region *region; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index c26fa1f3ed86..32d2633092a3 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, return NULL; } -static bool vfio_iommu_has_resv_msi(struct iommu_group *group, - phys_addr_t *base) +static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) { struct list_head group_resv_regions; struct iommu_resv_region *region, *next; @@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group, INIT_LIST_HEAD(&group_resv_regions); iommu_get_group_resv_regions(group, &group_resv_regions); list_for_each_entry(region, &group_resv_regions, list) { - if (region->type & IOMMU_RESV_MSI) { + if (region->type == IOMMU_RESV_SW_MSI) { *base = region->start; ret = true; goto out; @@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_domain; - resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); + resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); INIT_LIST_HEAD(&domain->group_list); list_add(&group->next, &domain->group_list); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6a6de187ddc0..2e4de0deee53 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -125,9 +125,16 @@ enum iommu_attr { }; /* These are the possible reserved region types */ -#define IOMMU_RESV_DIRECT (1 << 0) -#define IOMMU_RESV_RESERVED (1 << 1) -#define IOMMU_RESV_MSI (1 << 2) +enum iommu_resv_type { + /* Memory regions which must be mapped 1:1 at all times */ + IOMMU_RESV_DIRECT, + /* Arbitrary "never map this or give it to a device" address ranges */ + IOMMU_RESV_RESERVED, + /* Hardware MSI region (untranslated) */ + IOMMU_RESV_MSI, + /* Software-managed MSI translation window */ + IOMMU_RESV_SW_MSI, +}; /** * struct iommu_resv_region - descriptor for a reserved memory region @@ -142,7 +149,7 @@ struct iommu_resv_region { phys_addr_t start; size_t length; int prot; - int type; + enum iommu_resv_type type; }; #ifdef CONFIG_IOMMU_API @@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); extern struct iommu_resv_region * -iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, + enum iommu_resv_type type); extern int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head); -- cgit v1.2.3 From 210c4f70b4c630b27f0840c8043c138c955edc9e Mon Sep 17 00:00:00 2001 From: hayeswang Date: Mon, 20 Mar 2017 16:13:44 +0800 Subject: r8152: set the RMS of RTL8153 according to the mtu Set the received maximum size (RMS) according to the mtu size. It is unnecessary to receive a packet which is more than the size we could transmit. Besides, this could let the rx buffer be used effectively. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index bb3eedd07fbe..525c25817013 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2899,7 +2899,8 @@ static void r8153_first_init(struct r8152 *tp) rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); @@ -2951,7 +2952,8 @@ static void r8153_enter_oob(struct r8152 *tp) usleep_range(1000, 2000); } - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~TEREDO_WAKE_MASK; @@ -4201,8 +4203,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; - if (netif_running(dev) && netif_carrier_ok(dev)) - r8153_set_rx_early_size(tp); + if (netif_running(dev)) { + u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE; + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); + + if (netif_carrier_ok(dev)) + r8153_set_rx_early_size(tp); + } mutex_unlock(&tp->control); -- cgit v1.2.3 From b20cb60e2b865638459e6ec82ad3536d3734e555 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Mon, 20 Mar 2017 16:13:45 +0800 Subject: r8152: fix the rx early size of RTL8153 revert commit a59e6d815226 ("r8152: correct the rx early size") and fix the rx early size as (rx buffer size - rx packet size - rx desc size - alignment) / 4 Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 525c25817013..0b1b9188625d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "8" +#define NET_VERSION "9" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers " @@ -501,6 +501,8 @@ enum rtl_register_content { #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) #define RTL8152_NAPI_WEIGHT 64 +#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \ + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { @@ -2253,8 +2255,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) static void r8153_set_rx_early_size(struct r8152 *tp) { - u32 mtu = tp->netdev->mtu; - u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8; + u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4; ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); } -- cgit v1.2.3 From be9ca0d33c850192198c22518eeb1f41401268e8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 20 Mar 2017 09:52:50 +0100 Subject: cpsw/netcp: work around reverse cpts dependency The dependency is reversed: cpsw and netcp call into cpts, but cpts depends on the other two in Kconfig. This can lead to cpts being a loadable module and its callers built-in: drivers/net/ethernet/ti/cpsw.o: In function `cpsw_remove': cpsw.c:(.text.cpsw_remove+0xd0): undefined reference to `cpts_release' drivers/net/ethernet/ti/cpsw.o: In function `cpsw_rx_handler': cpsw.c:(.text.cpsw_rx_handler+0x2dc): undefined reference to `cpts_rx_timestamp' drivers/net/ethernet/ti/cpsw.o: In function `cpsw_tx_handler': cpsw.c:(.text.cpsw_tx_handler+0x7c): undefined reference to `cpts_tx_timestamp' drivers/net/ethernet/ti/cpsw.o: In function `cpsw_ndo_stop': As a workaround, I'm introducing another Kconfig symbol to control the compilation of cpts, while making the actual module controlled by a silent symbol that is =y when necessary. Fixes: 6246168b4a38 ("net: ethernet: ti: netcp: add support of cpts") Signed-off-by: Arnd Bergmann Reviewed-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/Kconfig | 8 +++++++- drivers/net/ethernet/ti/Makefile | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 296c8efd0038..d923890a9fda 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -74,7 +74,7 @@ config TI_CPSW will be called cpsw. config TI_CPTS - tristate "TI Common Platform Time Sync (CPTS) Support" + bool "TI Common Platform Time Sync (CPTS) Support" depends on TI_CPSW || TI_KEYSTONE_NETCP imply PTP_1588_CLOCK ---help--- @@ -83,6 +83,12 @@ config TI_CPTS The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the driver offers a PTP Hardware Clock. +config TI_CPTS_MOD + tristate + depends on TI_CPTS + default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y + default m + config TI_KEYSTONE_NETCP tristate "TI Keystone NETCP Core Support" select TI_CPSW_ALE diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 1e7c10bf8713..10e6b0ce51ba 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o -obj-$(CONFIG_TI_CPTS) += cpts.o +obj-$(CONFIG_TI_CPTS_MOD) += cpts.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw.o -- cgit v1.2.3 From 07fef3623407444e51c12ea57cd91df38c1069e0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 20 Mar 2017 09:58:33 +0100 Subject: cpsw/netcp: cpts depends on posix_timers With posix timers having become optional, we get a build error with the cpts time sync option of the CPSW driver: drivers/net/ethernet/ti/cpts.c: In function 'cpts_find_ts': drivers/net/ethernet/ti/cpts.c:291:23: error: implicit declaration of function 'ptp_classify_raw';did you mean 'ptp_classifier_init'? [-Werror=implicit-function-declaration] This adds a hard dependency on PTP_CLOCK to avoid the problem, as building it without PTP support makes no sense anyway. Fixes: baa73d9e478f ("posix-timers: Make them configurable") Cc: Nicolas Pitre Cc: stable@vger.kernel.org Signed-off-by: Arnd Bergmann Acked-by: Nicolas Pitre Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index d923890a9fda..9e631952b86f 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -76,7 +76,7 @@ config TI_CPSW config TI_CPTS bool "TI Common Platform Time Sync (CPTS) Support" depends on TI_CPSW || TI_KEYSTONE_NETCP - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK ---help--- This driver supports the Common Platform Time Sync unit of the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. -- cgit v1.2.3 From 1f30a86c58093046dc3e49c23d2618894e098f7a Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 21 Mar 2017 15:59:12 +0200 Subject: net/mlx5: Add missing entries for set/query rate limit commands The switch cases for the rate limit set and query commands were missing, which could get us wrong under fw error or driver reset flow, fix that. Fixes: 1466cc5b23d1 ('net/mlx5: Rate limit tables support') Signed-off-by: Or Gerlitz Reviewed-by: Hadar Hen Zion Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index caa837e5e2b9..a380353a78c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: + case MLX5_CMD_OP_SET_RATE_LIMIT: + case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_CONFIG_INT_MODERATION: @@ -497,6 +499,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); + MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); + MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(ALLOC_PD); MLX5_COMMAND_STR_CASE(DEALLOC_PD); MLX5_COMMAND_STR_CASE(ALLOC_UAR); -- cgit v1.2.3 From d85cdccbb3fe9a632ec9d0f4e4526c8c84fc3523 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 21 Mar 2017 15:59:13 +0200 Subject: net/mlx5e: Change the TC offload rule add/del code path to be per NIC or E-Switch Refactor the code to deal with add/del TC rules to have handler per NIC/E-switch offloading use case, and push the latter into the e-switch code. This provides better separation and is to be used in down-stream patch for applying a fix. Fixes: bffaa916588e ("net/mlx5: E-Switch, Add control for inline mode") Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 59 ++++++++++++++-------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 5 ++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 14 +++++ 3 files changed, 58 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 79481f4cf264..2825b5665456 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -133,6 +133,23 @@ err_create_ft: return rule; } +static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_fc *counter = NULL; + + if (!IS_ERR(flow->rule)) { + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_del_flow_rules(flow->rule); + mlx5_fc_destroy(priv->mdev, counter); + } + + if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { + mlx5_destroy_flow_table(priv->fs.tc.t); + priv->fs.tc.t = NULL; + } +} + static struct mlx5_flow_handle * mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, @@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) { + struct mlx5e_tc_flow *flow); + +static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); + + mlx5_eswitch_del_vlan_action(esw, flow->attr); + + if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + mlx5e_detach_encap(priv, flow); +} + +static void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ struct list_head *next = flow->encap.next; list_del(&flow->encap); @@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_fc *counter = NULL; - - if (!IS_ERR(flow->rule)) { - counter = mlx5_flow_rule_counter(flow->rule); - mlx5_del_flow_rules(flow->rule); - mlx5_fc_destroy(priv->mdev, counter); - } - - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - mlx5_eswitch_del_vlan_action(esw, flow->attr); - if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) - mlx5e_detach_encap(priv, flow); - } - - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { - mlx5_destroy_flow_table(priv->fs.tc.t); - priv->fs.tc.t = NULL; - } + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + mlx5e_tc_del_fdb_flow(priv, flow); + else + mlx5e_tc_del_nic_flow(priv, flow); } static void parse_vxlan_attr(struct mlx5_flow_spec *spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 5b78883d5654..9227a83a97e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -271,6 +271,11 @@ struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr); +void +mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr); + struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4f5b0d47d5f3..bfabefe20ac0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -97,6 +97,20 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, return rule; } +void +mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_fc *counter = NULL; + + if (!IS_ERR(rule)) { + counter = mlx5_flow_rule_counter(rule); + mlx5_del_flow_rules(rule); + mlx5_fc_destroy(esw->dev, counter); + } +} + static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) { struct mlx5_eswitch_rep *rep; -- cgit v1.2.3 From 375f51e2b5b7b9a42b3139aea519cbb1bfc5d6ef Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 21 Mar 2017 15:59:14 +0200 Subject: net/mlx5: E-Switch, Don't allow changing inline mode when flows are configured Changing the eswitch inline mode can potentially cause already configured flows not to match the policy. E.g. set policy L4, add some L4 rules, set policy to L2 --> bad! Hence we disallow it. Keep track of how many offloaded rules are now set and refuse inline mode changes if this isn't zero. Fixes: bffaa916588e ("net/mlx5: E-Switch, Add control for inline mode") Signed-off-by: Roi Dayan Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9227a83a97e3..ad329b1680b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -209,6 +209,7 @@ struct mlx5_esw_offload { struct mlx5_eswitch_rep *vport_reps; DECLARE_HASHTABLE(encap_tbl, 8); u8 inline_mode; + u64 num_flows; }; struct mlx5_eswitch { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index bfabefe20ac0..307ec6c5fd3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -93,6 +93,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, spec, &flow_act, dest, i); if (IS_ERR(rule)) mlx5_fc_destroy(esw->dev, counter); + else + esw->offloads.num_flows++; return rule; } @@ -108,6 +110,7 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, counter = mlx5_flow_rule_counter(rule); mlx5_del_flow_rules(rule); mlx5_fc_destroy(esw->dev, counter); + esw->offloads.num_flows--; } } @@ -922,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) return -EOPNOTSUPP; + if (esw->offloads.num_flows > 0) { + esw_warn(dev, "Can't set inline mode when flows are configured\n"); + return -EOPNOTSUPP; + } + err = esw_inline_mode_from_devlink(mode, &mlx5_mode); if (err) goto out; -- cgit v1.2.3 From 09c91ddf2cd33489c2c14edfef43ae38d412888e Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 21 Mar 2017 15:59:15 +0200 Subject: net/mlx5e: Use the proper UAPI values when offloading TC vlan actions Currently we use the non UAPI values and we miss erring on the modify action which is not supported, fix that. Fixes: 8b32580df1cb ('net/mlx5e: Add TC vlan action for SRIOV offloads') Signed-off-by: Or Gerlitz Reported-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 2825b5665456..9c13abaf3885 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1131,14 +1131,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_vlan(a)) { - if (tcf_vlan_action(a) == VLAN_F_POP) { + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) return -EOPNOTSUPP; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; attr->vlan = tcf_vlan_push_vid(a); + } else { /* action is TCA_VLAN_ACT_MODIFY */ + return -EOPNOTSUPP; } continue; } -- cgit v1.2.3 From 1ad9a00ae0efc2e9337148d6c382fad3d27bf99a Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 21 Mar 2017 15:59:16 +0200 Subject: net/mlx5e: Avoid supporting udp tunnel port ndo for VF reps This was added to allow the TC offloading code to identify offloading encap/decap vxlan rules. The VF reps are effectively related to the same mlx5 PCI device as the PF. Since the kernel invokes the (say) delete ndo for each netdev, the FW erred on multiple vxlan dst port deletes when the port was deleted from the system. We fix that by keeping the registration to be carried out only by the PF. Since the PF serves as the uplink device, the VF reps will look up a port there and realize if they are ok to offload that. Tested: ip link add vxlan1 type vxlan id 44 dev ens5f0 dstport 9999 ip link set vxlan1 up ip link del dev vxlan1 Fixes: 4a25730eb202 ('net/mlx5e: Add ndo_udp_tunnel_add to VF representors') Signed-off-by: Paul Blakey Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 ---- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 9 +++++++-- 4 files changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f6a6ded204f6..dc52053128bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); -void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti); -void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti); int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8ef64c4db2c2..66c133757a5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } -void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void mlx5e_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev, mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); } -void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void mlx5e_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2c864574a9d5..f621373bd7a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, .ndo_get_stats64 = mlx5e_rep_get_stats, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9c13abaf3885..fade7233dac5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -267,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS, f->mask); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_priv *up_priv = netdev_priv(up_dev); /* Full udp dst port must be given */ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) goto vxlan_match_offload_err; - if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && + if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { @@ -995,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_priv *up_priv = netdev_priv(up_dev); unsigned short family = ip_tunnel_info_af(tun_info); struct ip_tunnel_key *key = &tun_info->key; struct mlx5_encap_entry *e; @@ -1015,7 +1020,7 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && + if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { -- cgit v1.2.3 From 5f40b4ed975c26016cf41953b7510fe90718e21c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 21 Mar 2017 15:59:17 +0200 Subject: net/mlx5: Increase number of max QPs in default profile With ConnectX-4 sharing SRQs from the same space as QPs, we hit a limit preventing some applications to allocate needed QPs amount. Double the size to 256K. Fixes: e126ba97dba9e ('mlx5: Add driver for Mellanox Connect-IB adapters') Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e2bd600d19de..60154a175bd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = { [2] = { .mask = MLX5_PROF_MASK_QP_SIZE | MLX5_PROF_MASK_MR_CACHE, - .log_max_qp = 17, + .log_max_qp = 18, .mr_cache[0] = { .size = 500, .limit = 250 -- cgit v1.2.3 From d3a4e4da54c7adb420d5f48e89be913b14bdeff1 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 21 Mar 2017 15:59:18 +0200 Subject: net/mlx5e: Count GSO packets correctly TX packets statistics ('tx_packets' counter) used to count GSO packets as one, even though it contains multiple segments. This patch will increment the counter by the number of segments, and align the driver with the behavior of other drivers in the stack. Note that no information is lost in this patch due to 'tx_tso_packets' counter existence. Before, ethtool showed: $ ethtool -S ens6 | egrep "tx_packets|tx_tso_packets" tx_packets: 61340 tx_tso_packets: 60954 tx_packets_phy: 2451115 Now, we will see the more logical statistics: $ ethtool -S ens6 | egrep "tx_packets|tx_tso_packets" tx_packets: 2451115 tx_tso_packets: 60954 tx_packets_phy: 2451115 Fixes: e586b3b0baee ("net/mlx5: Ethernet Datapath files") Signed-off-by: Gal Pressman Cc: kernel-team@fb.com Signed-off-by: Saeed Mahameed Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f193128bac4b..57f5e2d7ebd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->stats.tso_bytes += skb->len - ihs; } + sq->stats.packets += skb_shinfo(skb)->gso_segs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; } else { bf = sq->bf_budget && !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); + sq->stats.packets++; num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } + sq->stats.bytes += num_bytes; wi->num_bytes = num_bytes; ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; @@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) if (bf) sq->bf_budget--; - sq->stats.packets++; - sq->stats.bytes += num_bytes; return NETDEV_TX_OK; dma_unmap_wqe_err: -- cgit v1.2.3 From 8ab7e2ae15d84ba758b2c8c6f4075722e9bd2a08 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 21 Mar 2017 15:59:19 +0200 Subject: net/mlx5e: Count LRO packets correctly RX packets statistics ('rx_packets' counter) used to count LRO packets as one, even though it contains multiple segments. This patch will increment the counter by the number of segments, and align the driver with the behavior of other drivers in the stack. Note that no information is lost in this patch due to 'rx_lro_packets' counter existence. Before, ethtool showed: $ ethtool -S ens6 | egrep "rx_packets|rx_lro_packets" rx_packets: 435277 rx_lro_packets: 35847 rx_packets_phy: 1935066 Now, we will see the more logical statistics: $ ethtool -S ens6 | egrep "rx_packets|rx_lro_packets" rx_packets: 1935066 rx_lro_packets: 35847 rx_packets_phy: 1935066 Fixes: e586b3b0baee ("net/mlx5: Ethernet Datapath files") Signed-off-by: Gal Pressman Cc: kernel-team@fb.com Signed-off-by: Saeed Mahameed Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3d371688fbbb..bafcb349a50c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); + /* Subtract one since we already counted this as one + * "regular" packet in mlx5e_complete_rx_cqe() + */ + rq->stats.packets += lro_num_seg - 1; rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; } -- cgit v1.2.3 From ac23d3cac1f339febd95c403a245ae072dfd0e84 Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Tue, 21 Mar 2017 11:44:25 -0400 Subject: fjes: Do not load fjes driver if system does not have extended socket device. The fjes driver is used only by FUJITSU servers and almost of all servers in the world never use it. But currently if ACPI PNP0C02 is defined in the ACPI table, the following message is always shown: "FUJITSU Extended Socket Network Device Driver - version 1.2 - Copyright (c) 2015 FUJITSU LIMITED" The message makes users confused because there is no reason that the message is shown in other vendor servers. To avoid the confusion, the patch adds a check that the server has a extended socket device or not. Signed-off-by: Yasuaki Ishimatsu CC: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 52 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index c4b3c4b77a9c..7b589649ab46 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02" + static int fjes_request_irq(struct fjes_adapter *); static void fjes_free_irq(struct fjes_adapter *); @@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int); static int fjes_poll(struct napi_struct *, int); static const struct acpi_device_id fjes_acpi_ids[] = { - {"PNP0C02", 0}, + {ACPI_MOTHERBOARD_RESOURCE_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); @@ -115,18 +117,17 @@ static struct resource fjes_resource[] = { }, }; -static int fjes_acpi_add(struct acpi_device *device) +static bool is_extended_socket_device(struct acpi_device *device) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; - struct platform_device *plat_dev; union acpi_object *str; acpi_status status; int result; status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); if (ACPI_FAILURE(status)) - return -ENODEV; + return false; str = buffer.pointer; result = utf16s_to_utf8s((wchar_t *)str->string.pointer, @@ -136,10 +137,21 @@ static int fjes_acpi_add(struct acpi_device *device) if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { kfree(buffer.pointer); - return -ENODEV; + return false; } kfree(buffer.pointer); + return true; +} + +static int fjes_acpi_add(struct acpi_device *device) +{ + struct platform_device *plat_dev; + acpi_status status; + + if (!is_extended_socket_device(device)) + return -ENODEV; + status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, fjes_get_acpi_resource, fjes_resource); if (ACPI_FAILURE(status)) @@ -1473,11 +1485,41 @@ static void fjes_watch_unshare_task(struct work_struct *work) } } +static acpi_status +acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level, + void *context, void **return_value) +{ + struct acpi_device *device; + bool *found = context; + int result; + + result = acpi_bus_get_device(obj_handle, &device); + if (result) + return AE_OK; + + if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID)) + return AE_OK; + + if (!is_extended_socket_device(device)) + return AE_OK; + + *found = true; + return AE_CTRL_TERMINATE; +} + /* fjes_init_module - Driver Registration Routine */ static int __init fjes_init_module(void) { + bool found = false; int result; + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + acpi_find_extended_socket_device, NULL, &found, + NULL); + + if (!found) + return -ENODEV; + pr_info("%s - version %s - %s\n", fjes_driver_string, fjes_driver_version, fjes_copyright); -- cgit v1.2.3 From 2b396d302650f1ebb770ed758ddcf5a64328ffd5 Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Tue, 21 Mar 2017 11:46:35 -0400 Subject: fjes: Do not load fjes driver if extended socket device is not power on. The extended device socket cannot turn on/off while system is running. So when system boots up and the device is not power on, the fjes driver does not need be loaded. To check the status of the device, the patch adds ACPI _STA method check. Signed-off-by: Yasuaki Ishimatsu CC: Taku Izumi Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 7b589649ab46..ae48c809bac9 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -144,6 +144,24 @@ static bool is_extended_socket_device(struct acpi_device *device) return true; } +static int acpi_check_extended_socket_status(struct acpi_device *device) +{ + unsigned long long sta; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) + return -ENODEV; + + if (!((sta & ACPI_STA_DEVICE_PRESENT) && + (sta & ACPI_STA_DEVICE_ENABLED) && + (sta & ACPI_STA_DEVICE_UI) && + (sta & ACPI_STA_DEVICE_FUNCTIONING))) + return -ENODEV; + + return 0; +} + static int fjes_acpi_add(struct acpi_device *device) { struct platform_device *plat_dev; @@ -152,6 +170,9 @@ static int fjes_acpi_add(struct acpi_device *device) if (!is_extended_socket_device(device)) return -ENODEV; + if (acpi_check_extended_socket_status(device)) + return -ENODEV; + status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, fjes_get_acpi_resource, fjes_resource); if (ACPI_FAILURE(status)) @@ -1503,6 +1524,9 @@ acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level, if (!is_extended_socket_device(device)) return AE_OK; + if (acpi_check_extended_socket_status(device)) + return AE_OK; + *found = true; return AE_CTRL_TERMINATE; } -- cgit v1.2.3 From 31739eae738ccbe8b9d627c3f2251017ca03f4d2 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Tue, 21 Mar 2017 14:01:06 -0700 Subject: net: bcmgenet: remove bcmgenet_internal_phy_setup() Commit 6ac3ce8295e6 ("net: bcmgenet: Remove excessive PHY reset") removed the bcmgenet_mii_reset() function from bcmgenet_power_up() and bcmgenet_internal_phy_setup() functions. In so doing it broke the reset of the internal PHY devices used by the GENETv1-GENETv3 which required this reset before the UniMAC was enabled. It also broke the internal GPHY devices used by the GENETv4 because the config_init that installed the AFE workaround was no longer occurring after the reset of the GPHY performed by bcmgenet_phy_power_set() in bcmgenet_internal_phy_setup(). In addition the code in bcmgenet_internal_phy_setup() related to the "enable APD" comment goes with the bcmgenet_mii_reset() so it should have also been removed. Commit bd4060a6108b ("net: bcmgenet: Power on integrated GPHY in bcmgenet_power_up()") moved the bcmgenet_phy_power_set() call to the bcmgenet_power_up() function, but failed to remove it from the bcmgenet_internal_phy_setup() function. Had it done so, the bcmgenet_internal_phy_setup() function would have been empty and could have been removed at that time. Commit 5dbebbb44a6a ("net: bcmgenet: Software reset EPHY after power on") was submitted to correct the functional problems introduced by commit 6ac3ce8295e6 ("net: bcmgenet: Remove excessive PHY reset"). It was included in v4.4 and made available on 4.3-stable. Unfortunately, it didn't fully revert the commit because this bcmgenet_mii_reset() doesn't apply the soft reset to the internal GPHY used by GENETv4 like the previous one did. This prevents the restoration of the AFE work- arounds for internal GPHY devices after the bcmgenet_phy_power_set() in bcmgenet_internal_phy_setup(). This commit takes the alternate approach of removing the unnecessary bcmgenet_internal_phy_setup() function which shouldn't have been in v4.3 so that when bcmgenet_mii_reset() was restored it should have only gone into bcmgenet_power_up(). This will avoid the problems while also removing the redundancy (and hopefully some of the confusion). Fixes: 6ac3ce8295e6 ("net: bcmgenet: Remove excessive PHY reset") Signed-off-by: Doug Berger Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmmii.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e87607621e62..2f9281936f0e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) udelay(60); } -static void bcmgenet_internal_phy_setup(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - /* Power up PHY */ - bcmgenet_phy_power_set(dev, true); - /* enable APD */ - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_PWR_DN_EN_LD; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - bcmgenet_mii_reset(dev); -} - static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) { u32 reg; @@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev) if (priv->internal_phy) { phy_name = "internal PHY"; - bcmgenet_internal_phy_setup(dev); } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { phy_name = "MoCA"; bcmgenet_moca_phy_setup(priv); -- cgit v1.2.3 From 6e9e6cc8f4e4f2cd67931510c9f39abf3d9e0d3b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 20 Mar 2017 15:31:10 -0700 Subject: Bluetooth: btqcomsmd: fix compile-test dependency compile-testing fails when QCOM_SMD is a loadable module: drivers/bluetooth/built-in.o: In function `btqcomsmd_send': btqca.c:(.text+0xa8): undefined reference to `qcom_smd_send' drivers/bluetooth/built-in.o: In function `btqcomsmd_probe': btqca.c:(.text+0x3ec): undefined reference to `qcom_wcnss_open_channel' btqca.c:(.text+0x46c): undefined reference to `qcom_smd_set_drvdata' This clarifies the dependency to allow compile-testing only when SMD is completely disabled, otherwise the dependency on QCOM_SMD will make sure we can link against it. Fixes: e27ee2b16bad ("Bluetooth: btqcomsmd: Allow driver to build if COMPILE_TEST is enabled") Signed-off-by: Arnd Bergmann [bjorn: Restructure and clarify dependency to QCOM_WCNSS_CTRL] Signed-off-by: Bjorn Andersson Acked-by: Marcel Holtmann Signed-off-by: David S. Miller --- drivers/bluetooth/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index c2c14a12713b..08e054507d0b 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -344,7 +344,8 @@ config BT_WILINK config BT_QCOMSMD tristate "Qualcomm SMD based HCI support" - depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST + depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) select BT_QCA help Qualcomm SMD based HCI driver. -- cgit v1.2.3 From c04ca616eed02b9abe7afd311382c3ed5eef5c40 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 22 Mar 2017 12:10:02 +0300 Subject: sfc: cleanup a condition in efx_udp_tunnel_del() Presumably if there is an "add" function, there is also a "del" function. But it causes a static checker warning because it looks like a common cut and paste bug. Signed-off-by: Dan Carpenter Acked-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 334bcc6df6b2..50d28261b6b9 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t tnl.type = (u16)efx_tunnel_type; tnl.port = ti->port; - if (efx->type->udp_tnl_add_port) + if (efx->type->udp_tnl_del_port) (void)efx->type->udp_tnl_del_port(efx, tnl); } -- cgit v1.2.3 From f43feef4e6acde10857fcbfdede790d6b3f2c71d Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 22 Mar 2017 17:25:27 -0500 Subject: amd-xgbe: Fix the ECC-related bit position definitions The ECC bit positions that describe whether the ECC interrupt is for Tx, Rx or descriptor memory and whether the it is a single correctable or double detected error were defined in incorrectly (reversed order). Fix the bit position definitions for these settings so that the proper ECC handling is performed. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 86f1626816ff..127adbeefb10 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -984,29 +984,29 @@ #define XP_ECC_CNT1_DESC_DED_WIDTH 8 #define XP_ECC_CNT1_DESC_SEC_INDEX 0 #define XP_ECC_CNT1_DESC_SEC_WIDTH 8 -#define XP_ECC_IER_DESC_DED_INDEX 0 +#define XP_ECC_IER_DESC_DED_INDEX 5 #define XP_ECC_IER_DESC_DED_WIDTH 1 -#define XP_ECC_IER_DESC_SEC_INDEX 1 +#define XP_ECC_IER_DESC_SEC_INDEX 4 #define XP_ECC_IER_DESC_SEC_WIDTH 1 -#define XP_ECC_IER_RX_DED_INDEX 2 +#define XP_ECC_IER_RX_DED_INDEX 3 #define XP_ECC_IER_RX_DED_WIDTH 1 -#define XP_ECC_IER_RX_SEC_INDEX 3 +#define XP_ECC_IER_RX_SEC_INDEX 2 #define XP_ECC_IER_RX_SEC_WIDTH 1 -#define XP_ECC_IER_TX_DED_INDEX 4 +#define XP_ECC_IER_TX_DED_INDEX 1 #define XP_ECC_IER_TX_DED_WIDTH 1 -#define XP_ECC_IER_TX_SEC_INDEX 5 +#define XP_ECC_IER_TX_SEC_INDEX 0 #define XP_ECC_IER_TX_SEC_WIDTH 1 -#define XP_ECC_ISR_DESC_DED_INDEX 0 +#define XP_ECC_ISR_DESC_DED_INDEX 5 #define XP_ECC_ISR_DESC_DED_WIDTH 1 -#define XP_ECC_ISR_DESC_SEC_INDEX 1 +#define XP_ECC_ISR_DESC_SEC_INDEX 4 #define XP_ECC_ISR_DESC_SEC_WIDTH 1 -#define XP_ECC_ISR_RX_DED_INDEX 2 +#define XP_ECC_ISR_RX_DED_INDEX 3 #define XP_ECC_ISR_RX_DED_WIDTH 1 -#define XP_ECC_ISR_RX_SEC_INDEX 3 +#define XP_ECC_ISR_RX_SEC_INDEX 2 #define XP_ECC_ISR_RX_SEC_WIDTH 1 -#define XP_ECC_ISR_TX_DED_INDEX 4 +#define XP_ECC_ISR_TX_DED_INDEX 1 #define XP_ECC_ISR_TX_DED_WIDTH 1 -#define XP_ECC_ISR_TX_SEC_INDEX 5 +#define XP_ECC_ISR_TX_SEC_INDEX 0 #define XP_ECC_ISR_TX_SEC_WIDTH 1 #define XP_I2C_MUTEX_BUSY_INDEX 31 #define XP_I2C_MUTEX_BUSY_WIDTH 1 -- cgit v1.2.3 From 68c386590375b2aea5a3154f17882a30170707bf Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Thu, 23 Mar 2017 02:20:39 +0300 Subject: net:ethernet:aquantia: Fix for RX checksum offload. Since AQC-100/107/108 chips supports hardware checksums for RX we should indicate this via NETIF_F_RXCSUM flag. v1->v2: 'Signed-off-by' tag added. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h | 1 + drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 1093ea18823a..0592a0330cf0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { .tx_rings = HW_ATL_A0_TX_RINGS, .rx_rings = HW_ATL_A0_RX_RINGS, .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_TSO, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 8bdee3ddd5a0..f3957e930340 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { .tx_rings = HW_ATL_B0_TX_RINGS, .rx_rings = HW_ATL_B0_RX_RINGS, .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_TSO | -- cgit v1.2.3 From e2ebfb2142acefecc2496e71360f50d25726040b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 20 Mar 2017 19:50:29 +0200 Subject: mmc: sdhci: Do not disable interrupts while waiting for clock Disabling interrupts for even a millisecond can cause problems for some devices. That can happen when sdhci changes clock frequency because it waits for the clock to become stable under a spin lock. The spin lock is not necessary here. Anything that is racing with changes to the I/O state is already broken. The mmc core already provides synchronization via "claiming" the host. Although the spin lock probably should be removed from the code paths that lead to this point, such a patch would touch too much code to be suitable for stable trees. Consequently, for this patch, just drop the spin lock while waiting. Signed-off-by: Adrian Hunter Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson Tested-by: Ludovic Desroches --- drivers/mmc/host/sdhci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6fdd7a70f229..9c1a099afbbe 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk) return; } timeout--; - mdelay(1); + spin_unlock_irq(&host->lock); + usleep_range(900, 1100); + spin_lock_irq(&host->lock); } clk |= SDHCI_CLOCK_CARD_EN; -- cgit v1.2.3 From 027fb89e61054b4aedd962adb3e2003dec78a716 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 20 Mar 2017 19:50:30 +0200 Subject: mmc: sdhci-pci: Do not disable interrupts in sdhci_intel_set_power Disabling interrupts for even a millisecond can cause problems for some devices. That can happen when Intel host controllers wait for the present state to propagate. The spin lock is not necessary here. Anything that is racing with changes to the I/O state is already broken. The mmc core already provides synchronization via "claiming" the host. Although the spin lock probably should be removed from the code paths that lead to this point, such a patch would touch too much code to be suitable for stable trees. Consequently, for this patch, just drop the spin lock while waiting. Signed-off-by: Adrian Hunter Cc: stable@vger.kernel.org # v4.9+ Signed-off-by: Ulf Hansson Tested-by: Ludovic Desroches --- drivers/mmc/host/sdhci-pci-core.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 982b3e349426..86560d590786 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, if (mode == MMC_POWER_OFF) return; + spin_unlock_irq(&host->lock); + /* * Bus power might not enable after D3 -> D0 transition due to the * present state not yet having propagated. Retry for up to 2ms. @@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, reg |= SDHCI_POWER_ON; sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); } + + spin_lock_irq(&host->lock); } static const struct sdhci_ops sdhci_intel_byt_ops = { -- cgit v1.2.3 From 6e790555b6cae4da9c578376ff500861337fcfc1 Mon Sep 17 00:00:00 2001 From: Peter Robinson Date: Thu, 2 Mar 2017 17:46:36 +0000 Subject: pinctrl: ti: The IODelay driver is a DRA7xxx feature so depend on that SoC As the IODelay driver is a hardware feature of the DRA7xxx SoC depend on that SoC and compile test. Signed-off-by: Peter Robinson Acked-by: Tony Lindgren Signed-off-by: Linus Walleij --- drivers/pinctrl/ti/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig index 815a88673d38..542077069391 100644 --- a/drivers/pinctrl/ti/Kconfig +++ b/drivers/pinctrl/ti/Kconfig @@ -1,6 +1,6 @@ config PINCTRL_TI_IODELAY tristate "TI IODelay Module pinconf driver" - depends on OF + depends on OF && (SOC_DRA7XX || COMPILE_TEST) select GENERIC_PINCTRL_GROUPS select GENERIC_PINMUX_FUNCTIONS select GENERIC_PINCONF -- cgit v1.2.3 From 08a7f260c88f8d52be52ff2113119b3464b69874 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Tue, 7 Mar 2017 14:31:00 +0100 Subject: pinctrl: meson-gxbb: Fix typo in i2c ao groups Signed-off-by: Neil Armstrong Signed-off-by: Linus Walleij --- drivers/pinctrl/meson/pinctrl-meson-gxbb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 7671424d46cb..31a3a98d067c 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = { }; static const char * const i2c_ao_groups[] = { - "i2c_sdk_ao", "i2c_sda_ao", + "i2c_sck_ao", "i2c_sda_ao", }; static const char * const i2c_slave_ao_groups[] = { - "i2c_slave_sdk_ao", "i2c_slave_sda_ao", + "i2c_slave_sck_ao", "i2c_slave_sda_ao", }; static const char * const remote_input_ao_groups[] = { -- cgit v1.2.3 From 59f34e80c4910bc69ca643a2bb93fcce51366d83 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Thu, 23 Feb 2017 11:12:40 +0100 Subject: pinctrl: samsung: Fix memory mapping code Some pinctrls share memory regions, and devm_ioremap_resource does not allow to share resources, in opposition to devm_ioremap. This patch restores back usage of devm_ioremap function, but with proper error handling and logging. Fixes: baafaca ("pinctrl: samsung: Fix return value check in samsung_pinctrl_get_soc_data()") Signed-off-by: Andrzej Hajda Tested-by: Marek Szyprowski Reviewed-by: Krzysztof Kozlowski Signed-off-by: Linus Walleij --- drivers/pinctrl/samsung/pinctrl-samsung.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index f9ddba7decc1..d7aa22cff480 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); - virt_base[i] = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(virt_base[i])) - return ERR_CAST(virt_base[i]); + if (!res) { + dev_err(&pdev->dev, "failed to get mem%d resource\n", i); + return ERR_PTR(-EINVAL); + } + virt_base[i] = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!virt_base[i]) { + dev_err(&pdev->dev, "failed to ioremap %pR\n", res); + return ERR_PTR(-EIO); + } } bank = d->pin_banks; -- cgit v1.2.3 From a6566710adaa4a7dd5e0d99820ff9c9c30ee5951 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 14 Mar 2017 08:23:26 -0700 Subject: pinctrl: qcom: Don't clear status bit on irq_unmask Clearing the status bit on irq_unmask will discard any pending interrupt that did arrive after the irq_ack, i.e. while the IRQ handler function was executing. Fixes: f365be092572 ("pinctrl: Add Qualcomm TLMM driver") Cc: stable@vger.kernel.org Cc: Stephen Boyd Reported-by: Timur Tabi Signed-off-by: Bjorn Andersson Signed-off-by: Linus Walleij --- drivers/pinctrl/qcom/pinctrl-msm.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index c978be5eb9eb..273badd92561 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->intr_status_reg); - val &= ~BIT(g->intr_status_bit); - writel(val, pctrl->regs + g->intr_status_reg); - val = readl(pctrl->regs + g->intr_cfg_reg); val |= BIT(g->intr_enable_bit); writel(val, pctrl->regs + g->intr_cfg_reg); -- cgit v1.2.3 From e855fa9a65c40788b5069abb0d094537daa22e05 Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Thu, 16 Mar 2017 18:26:02 +0100 Subject: pinctrl: st: add irq_request/release_resources callbacks When using GPIO as IRQ source, the GPIO must be configured in INPUT. Callbacks dedicated for this was missing in pinctrl-st driver. This fix the following kernel error when trying to lock a gpio as IRQ: [ 7.521095] gpio gpiochip7: (PIO11): gpiochip_lock_as_irq: tried to flag a GPIO set as output for IRQ [ 7.526018] gpio gpiochip7: (PIO11): unable to lock HW IRQ 6 for IRQ [ 7.529405] genirq: Failed to request resources for 0-0053 (irq 81) on irqchip GPIO Signed-off-by: Patrice Chotard Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-st.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 676efcc032d2..3ae8066bc127 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d) writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); } +static int st_gpio_irq_request_resources(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + + st_gpio_direction_input(gc, d->hwirq); + + return gpiochip_lock_as_irq(gc, d->hwirq); +} + +static void st_gpio_irq_release_resources(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + + gpiochip_unlock_as_irq(gc, d->hwirq); +} + static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = { }; static struct irq_chip st_gpio_irqchip = { - .name = "GPIO", - .irq_disable = st_gpio_irq_mask, - .irq_mask = st_gpio_irq_mask, - .irq_unmask = st_gpio_irq_unmask, - .irq_set_type = st_gpio_irq_set_type, - .flags = IRQCHIP_SKIP_SET_WAKE, + .name = "GPIO", + .irq_request_resources = st_gpio_irq_request_resources, + .irq_release_resources = st_gpio_irq_release_resources, + .irq_disable = st_gpio_irq_mask, + .irq_mask = st_gpio_irq_mask, + .irq_unmask = st_gpio_irq_unmask, + .irq_set_type = st_gpio_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static int st_gpiolib_register_bank(struct st_pinctrl *info, -- cgit v1.2.3 From d7402de48efae57bbb0072e53d3800c30de57ea5 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 16 Mar 2017 21:36:07 +0100 Subject: pinctrl: qcom: ipq4019: add missing pingroups for pins > 70 This patch adds the missing PINGROUP for GPIO70-99. This fixes a crash that happens in pinctrl-msm, if any of the GPIO70-99 are accessed. Fixes: 5303f7827fcd41d ("pinctrl: qcom: ipq4019: set ngpios to correct value") Signed-off-by: Christian Lamparter Acked-by: Bjorn Andersson Signed-off-by: Linus Walleij --- drivers/pinctrl/qcom/pinctrl-ipq4019.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'drivers') diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c index b68ae424cee2..743d1f458205 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c @@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = { PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), }; static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { -- cgit v1.2.3 From 1c5bf78114d38c2761721108a2d925bccb496027 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 18 Mar 2017 18:25:05 +0100 Subject: EDAC: Select DEBUG_FS The debugfs.c functionality relies on DEBUG_FS so select it. Signed-off-by: Borislav Petkov --- drivers/edac/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index be3eac6ad54d..4773f2867234 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS config EDAC_DEBUG bool "Debugging" + select DEBUG_FS help This turns on debugging information for the entire EDAC subsystem. You do so by inserting edac_module with "edac_debug_level=x." Valid -- cgit v1.2.3 From cd1be315ac9766fe83e4fb788d5d9fe032a9b877 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 18 Mar 2017 18:26:34 +0100 Subject: EDAC, pnd2_edac: Fix !EDAC_DEBUG build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide debugfs function stubs when EDAC_DEBUG is not enabled so that we don't fail the build: drivers/edac/pnd2_edac.c: In function ‘pnd2_init’: drivers/edac/pnd2_edac.c:1521:2: error: implicit declaration of function ‘setup_pnd2_debug’ [-Werror=implicit-function-declaration] setup_pnd2_debug(); ^ drivers/edac/pnd2_edac.c: In function ‘pnd2_exit’: drivers/edac/pnd2_edac.c:1529:2: error: implicit declaration of function ‘teardown_pnd2_debug’ [-Werror=implicit-function-declaration] teardown_pnd2_debug(); ^ Signed-off-by: Borislav Petkov --- drivers/edac/pnd2_edac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 14d39f05226e..ec2e349d728d 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1435,7 +1435,11 @@ static void teardown_pnd2_debug(void) { debugfs_remove_recursive(pnd2_test); } -#endif +#else +static void setup_pnd2_debug(void) {} +static void teardown_pnd2_debug(void) {} +#endif /* CONFIG_EDAC_DEBUG */ + static int pnd2_probe(void) { -- cgit v1.2.3 From 1c2593cc8fd5960f8861de1be67135851f884836 Mon Sep 17 00:00:00 2001 From: Ankur Arora Date: Tue, 21 Mar 2017 15:43:37 -0700 Subject: xen/acpi: Replace hard coded "ACPI0007" Replace hard coded "ACPI0007" with ACPI_PROCESSOR_DEVICE_HID Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Ankur Arora Signed-off-by: Boris Ostrovsky --- drivers/xen/xen-acpi-processor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 4ce10bcca18b..fac0d7b0edf7 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup) acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, read_acpi_id, NULL, NULL, NULL); - acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); + acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL); upload: if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { -- cgit v1.2.3 From 12ffed96d4369f086261ba2ee734fa8c932d7f55 Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Thu, 23 Mar 2017 17:53:26 +0900 Subject: drm/fb-helper: Allow var->x/yres(_virtual) < fb->width/height again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise this can also prevent modesets e.g. for switching VTs, when multiple monitors with different native resolutions are connected. The depths must match though, so keep the != test for that. Also update the DRM_DEBUG output to be slightly more accurate, this doesn't only affect requests from userspace. Bugzilla: https://bugs.freedesktop.org/99841 Fixes: 865afb11949e ("drm/fb-helper: reject any changes to the fbdev") Signed-off-by: Michel Dänzer Reviewed-by: Daniel Stone Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170323085326.20185-1-michel@daenzer.net --- drivers/gpu/drm/drm_fb_helper.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f6d4d9700734..324a688b3f30 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * to KMS, hence fail if different settings are requested. */ if (var->bits_per_pixel != fb->format->cpp[0] * 8 || - var->xres != fb->width || var->yres != fb->height || - var->xres_virtual != fb->width || var->yres_virtual != fb->height) { - DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " + var->xres > fb->width || var->yres > fb->height || + var->xres_virtual > fb->width || var->yres_virtual > fb->height) { + DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual, -- cgit v1.2.3 From 1914f0cd203c941bba72f9452c8290324f1ef3dc Mon Sep 17 00:00:00 2001 From: Ankur Arora Date: Tue, 21 Mar 2017 15:43:38 -0700 Subject: xen/acpi: upload PM state from init-domain to Xen This was broken in commit cd979883b9ed ("xen/acpi-processor: fix enabling interrupts on syscore_resume"). do_suspend (from xen/manage.c) and thus xen_resume_notifier never get called on the initial-domain at resume (it is if running as guest.) The rationale for the breaking change was that upload_pm_data() potentially does blocking work in syscore_resume(). This patch addresses the original issue by scheduling upload_pm_data() to execute in workqueue context. Cc: Stanislaw Gruszka Cc: stable@vger.kernel.org Based-on-patch-by: Konrad Wilk Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Stanislaw Gruszka Signed-off-by: Ankur Arora Signed-off-by: Boris Ostrovsky --- drivers/xen/xen-acpi-processor.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index fac0d7b0edf7..23e391d3ec01 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -27,10 +27,10 @@ #include #include #include +#include #include #include #include -#include #include #include @@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void) return rc; } -static int xen_acpi_processor_resume(struct notifier_block *nb, - unsigned long action, void *data) +static void xen_acpi_processor_resume_worker(struct work_struct *dummy) { + int rc; + bitmap_zero(acpi_ids_done, nr_acpi_bits); - return xen_upload_processor_pm_data(); + + rc = xen_upload_processor_pm_data(); + if (rc != 0) + pr_info("ACPI data upload failed, error = %d\n", rc); +} + +static void xen_acpi_processor_resume(void) +{ + static DECLARE_WORK(wq, xen_acpi_processor_resume_worker); + + /* + * xen_upload_processor_pm_data() calls non-atomic code. + * However, the context for xen_acpi_processor_resume is syscore + * with only the boot CPU online and in an atomic context. + * + * So defer the upload for some point safer. + */ + schedule_work(&wq); } -struct notifier_block xen_acpi_processor_resume_nb = { - .notifier_call = xen_acpi_processor_resume, +static struct syscore_ops xap_syscore_ops = { + .resume = xen_acpi_processor_resume, }; static int __init xen_acpi_processor_init(void) @@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void) if (rc) goto err_unregister; - xen_resume_notifier_register(&xen_acpi_processor_resume_nb); + register_syscore_ops(&xap_syscore_ops); return 0; err_unregister: @@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void) { int i; - xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); + unregister_syscore_ops(&xap_syscore_ops); kfree(acpi_ids_done); kfree(acpi_id_present); kfree(acpi_id_cst_present); -- cgit v1.2.3 From a2125d02443e9a4e68bcfd9f8004fa23239e8329 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 23 Mar 2017 16:03:11 +0100 Subject: hwmon: (asus_atk0110) fix uninitialized data access The latest gcc-7 snapshot adds a warning to point out that when atk_read_value_old or atk_read_value_new fails, we copy uninitialized data into sensor->cached_value: drivers/hwmon/asus_atk0110.c: In function 'atk_input_show': drivers/hwmon/asus_atk0110.c:651:26: error: 'value' may be used uninitialized in this function [-Werror=maybe-uninitialized] Adding an error check avoids this. All versions of the driver are affected. Fixes: 2c03d07ad54d ("hwmon: Add Asus ATK0110 support") Signed-off-by: Arnd Bergmann Reviewed-by: Luca Tettamanti Signed-off-by: Guenter Roeck --- drivers/hwmon/asus_atk0110.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index cccef87963e0..975c43d446f8 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value) else err = atk_read_value_new(sensor, value); + if (err) + return err; + sensor->is_valid = true; sensor->last_updated = jiffies; sensor->cached_value = *value; -- cgit v1.2.3 From 871a8623d3b40221ad1103aff715dfee0aa4dacf Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Fri, 17 Mar 2017 18:30:07 -0500 Subject: i40iw: Receive netdev events post INET_NOTIFIER state Netdev notification events are de-registered only when all client iwdev instances are removed. If a single client is closed and re-opened, netdev events could arrive even before the Control Queue-Pair (CQP) is created, causing a NULL pointer dereference crash in i40iw_get_cqp_request. Fix this by allowing netdev event notification only after we have reached the INET_NOTIFIER state with respect to device initialization. Reported-by: Stefan Assmann Signed-off-by: Shiraz Saleem Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/hw/i40iw/i40iw_utils.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 0f5d43d1f5fc..70c3e9e79508 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; + if (iwdev->init_state < INET_NOTIFIER) + return NOTIFY_DONE; + netdev = iwdev->ldev->netdev; upper_dev = netdev_master_upper_dev_get(netdev); if (netdev != event_netdev) @@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; + if (iwdev->init_state < INET_NOTIFIER) + return NOTIFY_DONE; + netdev = iwdev->ldev->netdev; if (netdev != event_netdev) return NOTIFY_DONE; @@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * if (!iwhdl) return NOTIFY_DONE; iwdev = &iwhdl->device; + if (iwdev->init_state < INET_NOTIFIER) + return NOTIFY_DONE; p = (__be32 *)neigh->primary_key; i40iw_copy_ip_ntohl(local_ipaddr, p); if (neigh->nud_state & NUD_VALID) { -- cgit v1.2.3 From 86f46aba8d1ac3ed0904542158a9b9cb9c7a143c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 8 Mar 2017 22:00:52 +0200 Subject: IB/core: Protect against self-requeue of a cq work item We need to make sure that the cq work item does not run when we are destroying the cq. Unlike flush_work, cancel_work_sync protects against self-requeue of the work item (which we can do in ib_cq_poll_work). Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche -- Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/cq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index e95510117a6d..2746d2eb3d52 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -196,7 +196,7 @@ void ib_free_cq(struct ib_cq *cq) irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: - flush_work(&cq->work); + cancel_work_sync(&cq->work); break; default: WARN_ON_ONCE(1); -- cgit v1.2.3 From cb8864559631754ac93d5734b165ccd0cad4728c Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 10 Mar 2017 11:34:20 -0700 Subject: infiniband: Fix alignment of mmap cookies to support VIPT caching When vmalloc_user is used to create memory that is supposed to be mmap'd to user space, it is necessary for the mmap cookie (eg the offset) to be aligned to SHMLBA. This creates a situation where all virtual mappings of the same physical page share the same virtual cache index and guarantees VIPT coherence. Otherwise the cache is non-coherent and the kernel will not see writes by userspace when reading the shared page (or vice-versa). Reported-by: Josh Beavers Signed-off-by: Jason Gunthorpe Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/mmap.c | 4 ++-- drivers/infiniband/sw/rxe/rxe_mmap.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index e202b8142759..6b712eecbd37 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c @@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, spin_lock_irq(&rdi->mmap_offset_lock); if (rdi->mmap_offset == 0) - rdi->mmap_offset = PAGE_SIZE; + rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->offset = rdi->mmap_offset; - rdi->mmap_offset += size; + rdi->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_irq(&rdi->mmap_offset_lock); INIT_LIST_HEAD(&ip->pending_mmaps); diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index c572a4c09359..bd812e00988e 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, spin_lock_bh(&rxe->mmap_offset_lock); if (rxe->mmap_offset == 0) - rxe->mmap_offset = PAGE_SIZE; + rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->info.offset = rxe->mmap_offset; - rxe->mmap_offset += size; + rxe->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_bh(&rxe->mmap_offset_lock); -- cgit v1.2.3 From 6332dee83d8eab80d6d502cc51135b998fe6df79 Mon Sep 17 00:00:00 2001 From: Adit Ranadive Date: Wed, 22 Feb 2017 17:22:56 -0800 Subject: RDMA/vmw_pvrdma: Cleanup unused variables Removed the unused nreq and redundant index variables. Moved hardcoded async and cq ring pages number to macro. Reported-by: Yuval Shaia Signed-off-by: Adit Ranadive Reviewed-by: Aditya Sarwade Tested-by: Andrew Boyer Signed-off-by: Doug Ledford --- drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | 2 ++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 4 ++-- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 33 ++++++++++---------------- 3 files changed, 17 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 3cd96c1b9502..dbf61c37835c 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -69,6 +69,8 @@ */ #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 +#define PVRDMA_NUM_RING_PAGES 4 + struct pvrdma_dev; struct pvrdma_page_dir { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 100bea5c42ff..e77476ca70ac 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -858,7 +858,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, dev->dsr->resp_slot_dma = (u64)slot_dma; /* Async event ring */ - dev->dsr->async_ring_pages.num_pages = 4; + dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; ret = pvrdma_page_dir_init(dev, &dev->async_pdir, dev->dsr->async_ring_pages.num_pages, true); if (ret) @@ -867,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; /* CQ notification ring */ - dev->dsr->cq_ring_pages.num_pages = 4; + dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, dev->dsr->cq_ring_pages.num_pages, true); if (ret) diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index dbbfd35e7da7..3ffbb2d42170 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -554,13 +554,13 @@ out: return ret; } -static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) +static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) { return pvrdma_page_dir_get_ptr(&qp->pdir, qp->sq.offset + n * qp->sq.wqe_size); } -static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) +static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) { return pvrdma_page_dir_get_ptr(&qp->pdir, qp->rq.offset + n * qp->rq.wqe_size); @@ -598,9 +598,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, unsigned long flags; struct pvrdma_sq_wqe_hdr *wqe_hdr; struct pvrdma_sge *sge; - int i, index; - int nreq; - int ret; + int i, ret; /* * In states lower than RTS, we can fail immediately. In other states, @@ -613,9 +611,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qp->sq.lock, flags); - index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); - for (nreq = 0; wr; nreq++, wr = wr->next) { - unsigned int tail; + while (wr) { + unsigned int tail = 0; if (unlikely(!pvrdma_idx_ring_has_space( qp->sq.ring, qp->sq.wqe_cnt, &tail))) { @@ -680,7 +677,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } } - wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); + wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); memset(wqe_hdr, 0, sizeof(*wqe_hdr)); wqe_hdr->wr_id = wr->wr_id; wqe_hdr->num_sge = wr->num_sge; @@ -771,12 +768,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, /* Make sure wqe is written before index update */ smp_wmb(); - index++; - if (unlikely(index >= qp->sq.wqe_cnt)) - index = 0; /* Update shared sq ring */ pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); + + wr = wr->next; } ret = 0; @@ -806,7 +802,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct pvrdma_qp *qp = to_vqp(ibqp); struct pvrdma_rq_wqe_hdr *wqe_hdr; struct pvrdma_sge *sge; - int index, nreq; int ret = 0; int i; @@ -821,9 +816,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qp->rq.lock, flags); - index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); - for (nreq = 0; wr; nreq++, wr = wr->next) { - unsigned int tail; + while (wr) { + unsigned int tail = 0; if (unlikely(wr->num_sge > qp->rq.max_sg || wr->num_sge < 0)) { @@ -843,7 +837,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto out; } - wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); + wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); wqe_hdr->wr_id = wr->wr_id; wqe_hdr->num_sge = wr->num_sge; wqe_hdr->total_len = 0; @@ -859,12 +853,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, /* Make sure wqe is written before index update */ smp_wmb(); - index++; - if (unlikely(index >= qp->rq.wqe_cnt)) - index = 0; /* Update shared rq ring */ pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); + + wr = wr->next; } spin_unlock_irqrestore(&qp->rq.lock, flags); -- cgit v1.2.3 From e51c2fb0331cb3440d7dc83ee78019ee8c7bb366 Mon Sep 17 00:00:00 2001 From: Adit Ranadive Date: Wed, 22 Feb 2017 17:22:57 -0800 Subject: RDMA/vmw_pvrdma: Dont hardcode QP header page Moved the header page count to a macro. Reported-by: Yuval Shaia Signed-off-by: Adit Ranadive Reviewed-by: Aditya Sarwade Tested-by: Andrew Boyer Signed-off-by: Doug Ledford --- drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | 1 + drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index dbf61c37835c..9fbe22d3467b 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -70,6 +70,7 @@ #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 #define PVRDMA_NUM_RING_PAGES 4 +#define PVRDMA_QP_NUM_HEADER_PAGES 1 struct pvrdma_dev; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 3ffbb2d42170..30062aad3af1 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, sizeof(struct pvrdma_sge) * qp->sq.max_sg); /* Note: one extra page for the header. */ - qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + - PAGE_SIZE - 1) / PAGE_SIZE; + qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + + (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / + PAGE_SIZE; return 0; } @@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, qp->npages = qp->npages_send + qp->npages_recv; /* Skip header page. */ - qp->sq.offset = PAGE_SIZE; + qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; /* Recv queue pages are after send pages. */ qp->rq.offset = qp->npages_send * PAGE_SIZE; @@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); cmd->access_flags = IB_ACCESS_LOCAL_WRITE; cmd->total_chunks = qp->npages; - cmd->send_chunks = qp->npages_send - 1; + cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; cmd->pdir_dma = qp->pdir.dir_dma; dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", -- cgit v1.2.3 From b172679b0d3b93a6197b2ff892794e7178e3c448 Mon Sep 17 00:00:00 2001 From: Aditya Sarwade Date: Wed, 22 Feb 2017 17:22:58 -0800 Subject: RDMA/vmw_pvrdma: Activate device on ethernet link up Restore device state when ethernet link changes to active. Acked-by: George Zhang Acked-by: Jorgen Hansen Acked-by: Bryan Tan Signed-off-by: Aditya Sarwade Signed-off-by: Adit Ranadive Signed-off-by: Doug Ledford --- drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 13 +++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h index e69d6f3cae32..09078ccfaec7 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h @@ -132,7 +132,7 @@ enum pvrdma_pci_resource { enum pvrdma_device_ctl { PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ - PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ + PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */ PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ }; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index e77476ca70ac..34ebc7615411 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -56,7 +56,7 @@ #include "pvrdma.h" #define DRV_NAME "vmw_pvrdma" -#define DRV_VERSION "1.0.0.0-k" +#define DRV_VERSION "1.0.1.0-k" static DEFINE_MUTEX(pvrdma_device_list_lock); static LIST_HEAD(pvrdma_device_list); @@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); break; case NETDEV_UP: - pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + pvrdma_write_reg(dev, PVRDMA_REG_CTL, + PVRDMA_DEVICE_CTL_UNQUIESCE); + + mb(); + + if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) + dev_err(&dev->pdev->dev, + "failed to activate device during link up\n"); + else + pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); break; default: dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", -- cgit v1.2.3 From ded260235308f340b979258a4c736e06ba12c747 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Mar 2017 08:21:52 +0300 Subject: IB/rxe: double free on error "goto err;" has it's own kfree_skb() call so it's a double free. We only need to free on the "goto exit;" path. Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: Dan Carpenter Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_req.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index dbfde0dc6ff7..9f95f50b2909 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -729,11 +729,11 @@ next_wqe: ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); if (ret) { qp->need_req_skb = 1; - kfree_skb(skb); rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (ret == -EAGAIN) { + kfree_skb(skb); rxe_run_task(&qp->req.task, 1); goto exit; } -- cgit v1.2.3 From 004d18ea99b0f3b7b3d5790e4dc5ca7d5238706d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 23 Feb 2017 13:40:16 +0300 Subject: RDMA/ocrdma: fix a type issue in ocrdma_put_pd_num() We want to return zero on success or negative error codes. The type should be int and not u8. Signed-off-by: Dan Carpenter Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index bc9fb144e57b..c52edeafd616 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, return 0; } -static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, +static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, bool dpp_pool) { int status; -- cgit v1.2.3 From a1c5dd13228a1f9e5087375f9702422dfc2adbf1 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 28 Feb 2017 21:42:53 +0200 Subject: IB/rxe: Update documentation link All Soft-RoCE (rxe) is handled now in rdma-core user space library, so the documentation. The patch below updates the documentation link to that new location. Reported-by: Josh Beavers Signed-off-by: Leon Romanovsky Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 7d1ac27ed251..6332dedc11e8 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -22,4 +22,4 @@ config RDMA_RXE To configure and work with soft-RoCE driver please use the following wiki page under "configure Soft-RoCE (RXE)" section: - https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home + https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md -- cgit v1.2.3 From 0957c29f78af7d890c4ac506eda8f76bfc5a137a Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 7 Mar 2017 22:56:53 +0000 Subject: IB/core: Restore I/O MMU, s390 and powerpc support Avoid that the following error message is reported on the console while loading an RDMA driver with I/O MMU support enabled: DMAR: Allocating domain for mlx5_0 failed Ensure that DMA mapping operations that use to_pci_dev() to access to struct pci_dev see the correct PCI device. E.g. the s390 and powerpc DMA mapping operations use to_pci_dev() even with I/O MMU support disabled. This patch preserves the following changes of the DMA mapping updates patch series: - Introduction of dma_virt_ops. - Removal of ib_device.dma_ops. - Removal of struct ib_dma_mapping_ops. - Removal of an if-statement from each ib_dma_*() operation. - IB HW drivers no longer set dma_device directly. Reported-by: Sebastian Ott Reported-by: Parav Pandit Fixes: commit 99db9494035f ("IB/core: Remove ib_device.dma_device") Signed-off-by: Bart Van Assche Reviewed-by: parav@mellanox.com Tested-by: parav@mellanox.com Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 26 ++++++++++++++++++++------ include/rdma/ib_verbs.h | 30 +++++++++++++++++------------- 2 files changed, 37 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 593d2ce6ec7c..addf869045cc 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device, struct device *parent = device->dev.parent; WARN_ON_ONCE(!parent); - if (!device->dev.dma_ops) - device->dev.dma_ops = parent->dma_ops; - if (!device->dev.dma_mask) - device->dev.dma_mask = parent->dma_mask; - if (!device->dev.coherent_dma_mask) - device->dev.coherent_dma_mask = parent->coherent_dma_mask; + WARN_ON_ONCE(device->dma_device); + if (device->dev.dma_ops) { + /* + * The caller provided custom DMA operations. Copy the + * DMA-related fields that are used by e.g. dma_alloc_coherent() + * into device->dev. + */ + device->dma_device = &device->dev; + if (!device->dev.dma_mask) + device->dev.dma_mask = parent->dma_mask; + if (!device->dev.coherent_dma_mask) + device->dev.coherent_dma_mask = + parent->coherent_dma_mask; + } else { + /* + * The caller did not provide custom DMA operations. Use the + * DMA mapping operations of the parent device. + */ + device->dma_device = parent; + } mutex_lock(&device_mutex); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0f1813c13687..99e4423eb2b8 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1863,6 +1863,9 @@ struct ib_port_immutable { }; struct ib_device { + /* Do not access @dma_device directly from ULP nor from HW drivers. */ + struct device *dma_device; + char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - return dma_mapping_error(&dev->dev, dma_addr); + return dma_mapping_error(dev->dma_device, dma_addr); } /** @@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - return dma_map_single(&dev->dev, cpu_addr, size, direction); + return dma_map_single(dev->dma_device, cpu_addr, size, direction); } /** @@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - dma_unmap_single(&dev->dev, addr, size, direction); + dma_unmap_single(dev->dma_device, addr, size, direction); } /** @@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - return dma_map_page(&dev->dev, page, offset, size, direction); + return dma_map_page(dev->dma_device, page, offset, size, direction); } /** @@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - dma_unmap_page(&dev->dev, addr, size, direction); + dma_unmap_page(dev->dma_device, addr, size, direction); } /** @@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - return dma_map_sg(&dev->dev, sg, nents, direction); + return dma_map_sg(dev->dma_device, sg, nents, direction); } /** @@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - dma_unmap_sg(&dev->dev, sg, nents, direction); + dma_unmap_sg(dev->dma_device, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, + dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - dma_sync_single_for_cpu(&dev->dev, addr, size, dir); + dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); } /** @@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - dma_sync_single_for_device(&dev->dev, addr, size, dir); + dma_sync_single_for_device(dev->dma_device, addr, size, dir); } /** @@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, dma_addr_t *dma_handle, gfp_t flag) { - return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); + return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); } /** @@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); + dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); } /** -- cgit v1.2.3 From 9fcd67d1772c43d2f23e8fca56acc7219e991676 Mon Sep 17 00:00:00 2001 From: David Marchand Date: Fri, 24 Feb 2017 15:38:26 +0100 Subject: IB/rxe: increment msn only when completing a request According to C9-147, MSN should only be incremented when the last packet of a multi packet request has been received. "Logically, the requester associates a sequential Send Sequence Number (SSN) with each WQE posted to the send queue. The SSN bears a one- to-one relationship to the MSN returned by the responder in each re- sponse packet. Therefore, when the requester receives a response, it in- terprets the MSN as representing the SSN of the most recent request completed by the responder to determine which send WQE(s) can be completed." Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: David Marchand Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_resp.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index d404a8aba7af..c9dd385ce62e 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) WARN_ON_ONCE(1); } - /* We successfully processed this new request. */ - qp->resp.msn++; - /* next expected psn, read handles this separately */ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; qp->resp.opcode = pkt->opcode; qp->resp.status = IB_WC_SUCCESS; - if (pkt->mask & RXE_COMP_MASK) + if (pkt->mask & RXE_COMP_MASK) { + /* We successfully processed this new request. */ + qp->resp.msn++; return RESPST_COMPLETE; - else if (qp_type(qp) == IB_QPT_RC) + } else if (qp_type(qp) == IB_QPT_RC) return RESPST_ACKNOWLEDGE; else return RESPST_CLEANUP; -- cgit v1.2.3 From fedd9e1f75828992ace5833379116f38c66ad7bb Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 16 Mar 2017 18:57:00 +0200 Subject: IB/cq: Don't process more than the given budget The caller might not want this overhead. Reviewed-by: Bart Van Assche Reviewed-by: Leon Romanovsky Signed-off-by: Sagi Grimberg Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/core/cq.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 2746d2eb3d52..f2ae75fa3128 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) { int i, n, completed = 0; - while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { + /* + * budget might be (-1) if the caller does not + * want to bound this call, thus we need unsigned + * minimum here. + */ + while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, + budget - completed), cq->wc)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &cq->wc[i]; -- cgit v1.2.3 From b7363e67b23e04c23c2a99437feefac7292a88bc Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 8 Mar 2017 22:03:17 +0200 Subject: IB/device: Convert ib-comp-wq to be CPU-bound This workqueue is used by our storage target mode ULPs via the new CQ API. Recent observations when working with very high-end flash storage devices reveal that UNBOUND workqueue threads can migrate between cpu cores and even numa nodes (although some numa locality is accounted for). While this attribute can be useful in some workloads, it does not fit in very nicely with the normal run-to-completion model we usually use in our target-mode ULPs and the block-mq irq<->cpu affinity facilities. The whole block-mq concept is that the completion will land on the same cpu where the submission was performed. The fact that our submitter thread is migrating cpus can break this locality. We assume that as a target mode ULP, we will serve multiple initiators/clients and we can spread the load enough without having to use unbound kworkers. Also, while we're at it, expose this workqueue via sysfs which is harmless and can be useful for debug. Signed-off-by: Sagi Grimberg Reviewed-by: Bart Van Assche -- Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index addf869045cc..7c9e34d679d3 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1029,8 +1029,7 @@ static int __init ib_core_init(void) return -ENOMEM; ib_comp_wq = alloc_workqueue("ib-comp-wq", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - WQ_UNBOUND_MAX_ACTIVE); + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!ib_comp_wq) { ret = -ENOMEM; goto err; -- cgit v1.2.3 From ea174c9573b0e0c8bc1a7a90fe9360ccb7aa9cbb Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 27 Feb 2017 20:16:33 +0200 Subject: RDMA/iser: Fix possible mr leak on device removal event When the rdma device is removed, we must cleanup all the rdma resources within the DEVICE_REMOVAL event handler to let the device teardown gracefully. When this happens with live I/O, some memory regions are occupied. Thus, track them too and dereg all the mr's. We are safe with mr access by iscsi_iser_cleanup_task. Reported-by: Raju Rangoju Signed-off-by: Sagi Grimberg Reviewed-by: Max Gurtovoy Reviewed-by: Max Gurtovoy Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 2 ++ drivers/infiniband/ulp/iser/iser_verbs.c | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9d0b22ad58c1..c1ae4aeae2f9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -430,6 +430,7 @@ struct iser_fr_desc { struct list_head list; struct iser_reg_resources rsc; struct iser_pi_context *pi_ctx; + struct list_head all_list; }; /** @@ -443,6 +444,7 @@ struct iser_fr_pool { struct list_head list; spinlock_t lock; int size; + struct list_head all_list; }; /** diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 30b622f2ab73..c538a38c91ce 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, int i, ret; INIT_LIST_HEAD(&fr_pool->list); + INIT_LIST_HEAD(&fr_pool->all_list); spin_lock_init(&fr_pool->lock); fr_pool->size = 0; for (i = 0; i < cmds_max; i++) { @@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, } list_add_tail(&desc->list, &fr_pool->list); + list_add_tail(&desc->all_list, &fr_pool->all_list); fr_pool->size++; } @@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn) struct iser_fr_desc *desc, *tmp; int i = 0; - if (list_empty(&fr_pool->list)) + if (list_empty(&fr_pool->all_list)) return; iser_info("freeing conn %p fr pool\n", ib_conn); - list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { - list_del(&desc->list); + list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { + list_del(&desc->all_list); iser_free_reg_res(&desc->rsc); if (desc->pi_ctx) iser_free_pi_ctx(desc->pi_ctx); -- cgit v1.2.3 From f6aafac184a3e46e919769dd4faa8bf0dc436534 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Mar 2017 13:18:45 +0100 Subject: IB/qib: fix false-postive maybe-uninitialized warning aarch64-linux-gcc-7 complains about code it doesn't fully understand: drivers/infiniband/hw/qib/qib_iba7322.c: In function 'qib_7322_txchk_change': include/asm-generic/bitops/non-atomic.h:105:35: error: 'shadow' may be used uninitialized in this function [-Werror=maybe-uninitialized] The code is right, and despite trying hard, I could not come up with a version that I liked better than just adding a fake initialization here to shut up the warning. Fixes: f931551bafe1 ("IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters") Signed-off-by: Arnd Bergmann Acked-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_iba7322.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 12c4208fd701..af9f596bb68b 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, unsigned long flags; while (wait) { - unsigned long shadow; + unsigned long shadow = 0; int cstart, previ = -1; /* -- cgit v1.2.3 From 819f60fb7db169d851186d04e571e9bca27321e8 Mon Sep 17 00:00:00 2001 From: Qiuxu Zhuo Date: Sat, 25 Mar 2017 19:29:01 +0800 Subject: EDAC, pnd2_edac: Fix reported DIMM number DIMM number passed to edac_mc_handle_error() was accidentally hardcoded to zero. Pass in the correct daddr->dimm value. Signed-off-by: Qiuxu Zhuo Signed-off-by: Borislav Petkov --- drivers/edac/pnd2_edac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index ec2e349d728d..928e0dba41fc 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1164,7 +1164,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, /* Call the helper to output message */ edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, - m->addr & ~PAGE_MASK, 0, daddr->chan, 0, -1, optype, msg); + m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg); return; -- cgit v1.2.3 From de85ec8b07f82c8c84de7687f769e74bf4c26a1e Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 23 Mar 2017 13:07:16 +0800 Subject: virtio_pci: fix out of bound access for msix_names Fedora has received multiple reports of crashes when running 4.11 as a guest https://bugzilla.redhat.com/show_bug.cgi?id=1430297 https://bugzilla.redhat.com/show_bug.cgi?id=1434462 https://bugzilla.kernel.org/show_bug.cgi?id=194911 https://bugzilla.redhat.com/show_bug.cgi?id=1433899 The crashes are not always consistent but they are generally some flavor of oops or GPF in virtio related code. Multiple people have done bisections (Thank you Thorsten Leemhuis and Richard W.M. Jones) and found this commit to be at fault 07ec51480b5eb1233f8c1b0f5d7a7c8d1247c507 is the first bad commit commit 07ec51480b5eb1233f8c1b0f5d7a7c8d1247c507 Author: Christoph Hellwig Date: Sun Feb 5 18:15:19 2017 +0100 virtio_pci: use shared interrupts for virtqueues The issue seems to be an out of bounds access to the msix_names array corrupting kernel memory. Fixes: 07ec51480b5e ("virtio_pci: use shared interrupts for virtqueues") Reported-by: Laura Abbott Signed-off-by: Jason Wang Signed-off-by: Michael S. Tsirkin Reviewed-by: Christoph Hellwig Tested-by: Richard W.M. Jones Tested-by: Thorsten Leemhuis --- drivers/virtio/virtio_pci_common.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index df548a6fb844..590534910dc6 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -147,7 +147,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); - int i, err = -ENOMEM, allocated_vectors, nvectors; + int i, j, err = -ENOMEM, allocated_vectors, nvectors; unsigned flags = PCI_IRQ_MSIX; bool shared = false; u16 msix_vec; @@ -212,7 +212,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, if (!vp_dev->msix_vector_map) goto out_disable_config_irq; - allocated_vectors = 1; /* vector 0 is the config interrupt */ + allocated_vectors = j = 1; /* vector 0 is the config interrupt */ for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; @@ -236,18 +236,19 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, continue; } - snprintf(vp_dev->msix_names[i + 1], + snprintf(vp_dev->msix_names[j], sizeof(*vp_dev->msix_names), "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), vring_interrupt, IRQF_SHARED, - vp_dev->msix_names[i + 1], vqs[i]); + vp_dev->msix_names[j], vqs[i]); if (err) { /* don't free this irq on error */ vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; goto out_remove_vqs; } vp_dev->msix_vector_map[i] = msix_vec; + j++; /* * Use a different vector for each queue if they are available, -- cgit v1.2.3 From fc8653228c8588a120f6b5dad6983b7b61ff669e Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Thu, 23 Mar 2017 08:04:18 +0100 Subject: virtio_balloon: init 1st buffer in stats vq When init_vqs runs, virtio_balloon.stats is either uninitialized or contains stale values. The host updates its state with garbage data because it has no way of knowing that this is just a marker buffer used for signaling. This patch updates the stats before pushing the initial buffer. Alternative fixes: * Push an empty buffer in init_vqs. Not easily done with the current virtio implementation and violates the spec "Driver MUST supply the same subset of statistics in all buffers submitted to the statsq". * Push a buffer with invalid tags in init_vqs. Violates the same spec clause, plus "invalid tag" is not really defined. Note: the spec says: When using the legacy interface, the device SHOULD ignore all values in the first buffer in the statsq supplied by the driver after device initialization. Note: Historically, drivers supplied an uninitialized buffer in the first buffer. Unfortunately QEMU does not seem to implement the recommendation even for the legacy interface. Cc: stable@vger.kernel.org Signed-off-by: Ladi Prosek Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_balloon.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 4e1191508228..fcd06e16a1a2 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -429,6 +429,8 @@ static int init_vqs(struct virtio_balloon *vb) * Prime this virtqueue with one buffer so the hypervisor can * use it to signal us later (it can't be broken yet!). */ + update_balloon_stats(vb); + sg_init_one(&sg, vb->stats, sizeof vb->stats); if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) -- cgit v1.2.3 From 9646b26e85896ef0256e66649f7937f774dc18a6 Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Tue, 28 Mar 2017 18:46:58 +0200 Subject: virtio-balloon: use actual number of stats for stats queue buffers The virtio balloon driver contained a not-so-obvious invariant that update_balloon_stats has to update exactly VIRTIO_BALLOON_S_NR counters in order to send valid stats to the host. This commit fixes it by having update_balloon_stats return the actual number of counters, and its callers use it when pushing buffers to the stats virtqueue. Note that it is still out of spec to change the number of counters at run-time. "Driver MUST supply the same subset of statistics in all buffers submitted to the statsq." Suggested-by: Arnd Bergmann Signed-off-by: Ladi Prosek Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_balloon.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index fcd06e16a1a2..d9544db231ef 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx, #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) -static void update_balloon_stats(struct virtio_balloon *vb) +static unsigned int update_balloon_stats(struct virtio_balloon *vb) { unsigned long events[NR_VM_EVENT_ITEMS]; struct sysinfo i; - int idx = 0; + unsigned int idx = 0; long available; all_vm_events(events); @@ -266,6 +266,8 @@ static void update_balloon_stats(struct virtio_balloon *vb) pages_to_bytes(i.totalram)); update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, pages_to_bytes(available)); + + return idx; } /* @@ -291,14 +293,14 @@ static void stats_handle_request(struct virtio_balloon *vb) { struct virtqueue *vq; struct scatterlist sg; - unsigned int len; + unsigned int len, num_stats; - update_balloon_stats(vb); + num_stats = update_balloon_stats(vb); vq = vb->stats_vq; if (!virtqueue_get_buf(vq, &len)) return; - sg_init_one(&sg, vb->stats, sizeof(vb->stats)); + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); } @@ -423,15 +425,16 @@ static int init_vqs(struct virtio_balloon *vb) vb->deflate_vq = vqs[1]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { struct scatterlist sg; + unsigned int num_stats; vb->stats_vq = vqs[2]; /* * Prime this virtqueue with one buffer so the hypervisor can * use it to signal us later (it can't be broken yet!). */ - update_balloon_stats(vb); + num_stats = update_balloon_stats(vb); - sg_init_one(&sg, vb->stats, sizeof vb->stats); + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) BUG(); -- cgit v1.2.3 From f0bb2d50dfcc519f06f901aac88502be6ff1df2c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 28 Mar 2017 18:46:59 +0200 Subject: virtio_balloon: prevent uninitialized variable use The latest gcc-7.0.1 snapshot reports a new warning: virtio/virtio_balloon.c: In function 'update_balloon_stats': virtio/virtio_balloon.c:258:26: error: 'events[2]' is used uninitialized in this function [-Werror=uninitialized] virtio/virtio_balloon.c:260:26: error: 'events[3]' is used uninitialized in this function [-Werror=uninitialized] virtio/virtio_balloon.c:261:56: error: 'events[18]' is used uninitialized in this function [-Werror=uninitialized] virtio/virtio_balloon.c:262:56: error: 'events[17]' is used uninitialized in this function [-Werror=uninitialized] This seems absolutely right, so we should add an extra check to prevent copying uninitialized stack data into the statistics. >From all I can tell, this has been broken since the statistics code was originally added in 2.6.34. Fixes: 9564e138b1f6 ("virtio: Add memory statistics reporting to the balloon driver (V4)") Signed-off-by: Arnd Bergmann Signed-off-by: Ladi Prosek Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_balloon.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index d9544db231ef..34adf9b9c053 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -254,12 +254,14 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb) available = si_mem_available(); +#ifdef CONFIG_VM_EVENT_COUNTERS update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, pages_to_bytes(events[PSWPIN])); update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, pages_to_bytes(events[PSWPOUT])); update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); +#endif update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, pages_to_bytes(i.freeram)); update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, -- cgit v1.2.3