summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/apei/ghes.c16
-rw-r--r--drivers/acpi/arm64/iort.c35
-rw-r--r--drivers/acpi/property.c53
-rw-r--r--drivers/android/binder.c112
-rw-r--r--drivers/android/binder_alloc.c42
-rw-r--r--drivers/android/binder_alloc.h1
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/auxdisplay/charlcd.c11
-rw-r--r--drivers/auxdisplay/panel.c11
-rw-r--r--drivers/base/arch_topology.c12
-rw-r--r--drivers/base/dma-coherent.c19
-rw-r--r--drivers/base/node.c12
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/opp/core.c7
-rw-r--r--drivers/base/power/qos.c10
-rw-r--r--drivers/base/property.c19
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/loop.h6
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/block/zram/zram_drv.c36
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm.h9
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c2
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c98
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c8
-rw-r--r--drivers/clk/clk-bulk.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c15
-rw-r--r--drivers/clocksource/cs5535-clockevt.c3
-rw-r--r--drivers/clocksource/numachip.c2
-rw-r--r--drivers/clocksource/timer-integrator-ap.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c16
-rw-r--r--drivers/cpufreq/ti-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-arm.c6
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c4
-rw-r--r--drivers/crypto/caam/Kconfig5
-rw-r--r--drivers/crypto/caam/ctrl.c19
-rw-r--r--drivers/crypto/caam/regs.h59
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/stm32/stm32-hash.c15
-rw-r--r--drivers/crypto/talitos.c9
-rw-r--r--drivers/dma-buf/sync_file.c17
-rw-r--r--drivers/dma/altera-msgdma.c41
-rw-r--r--drivers/dma/edma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c3
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/gpio/Kconfig3
-rw-r--r--drivers/gpio/gpio-omap.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c189
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c113
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c9
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c17
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c19
-rw-r--r--drivers/gpu/drm/tegra/trace.h2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c7
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c12
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c110
-rw-r--r--drivers/hv/channel.c6
-rw-r--r--drivers/hv/channel_mgmt.c46
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/xgene-hwmon.c19
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c162
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-scan-pci.c13
-rw-r--r--drivers/ide/setup-pci.c63
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7793.c4
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c28
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c45
-rw-r--r--drivers/iio/adc/mcp320x.c25
-rw-r--r--drivers/iio/adc/stm32-adc.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c8
-rw-r--r--drivers/iio/adc/twl4030-madc.c14
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c11
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c1
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c10
-rw-r--r--drivers/iio/proximity/as3935.c43
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c4
-rw-r--r--drivers/infiniband/core/iwpm_msg.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/core/security.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h14
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c101
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c20
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c41
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c50
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c154
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c39
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c14
-rw-r--r--drivers/infiniband/hw/mlx5/main.c20
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c47
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c30
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/input/ff-core.c13
-rw-r--r--drivers/input/input.c84
-rw-r--r--drivers/input/joydev.c70
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c29
-rw-r--r--drivers/input/misc/axp20x-pek.c2
-rw-r--r--drivers/input/misc/ims-pcu.c16
-rw-r--r--drivers/input/misc/uinput.c57
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c2
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/input/touchscreen/goodix.c67
-rw-r--r--drivers/input/touchscreen/stmfts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/amd_iommu_init.c8
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c2
-rw-r--r--drivers/iommu/mtk_iommu.c3
-rw-r--r--drivers/iommu/of_iommu.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c43
-rw-r--r--drivers/irqchip/irq-gic-v3.c8
-rw-r--r--drivers/irqchip/irq-gic-v4.c12
-rw-r--r--drivers/irqchip/irq-mips-gic.c19
-rw-r--r--drivers/irqchip/irq-tango.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c37
-rw-r--r--drivers/leds/leds-as3645a.c29
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-ioctl.c37
-rw-r--r--drivers/md/dm-raid.c13
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md.c72
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/cec/cec-adap.c13
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c50
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c1
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c3
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c11
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h2
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/media/tuners/mt2060.c59
-rw-r--r--drivers/misc/cxl/cxllib.c13
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c23
-rw-r--r--drivers/misc/mei/pci-txe.c30
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/core/mmc.c36
-rw-r--r--drivers/mmc/core/queue.c120
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/cavium-thunderx.c6
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c26
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c17
-rw-r--r--drivers/mmc/host/sdhci-xenon.c24
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c47
-rw-r--r--drivers/mtd/mtdpart.c8
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c3
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c45
-rw-r--r--drivers/net/can/flexcan.c91
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c182
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c89
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h18
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c184
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c23
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c30
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c25
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/realtek/8139too.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c112
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c8
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/phy/Kconfig18
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/cdc_ether.c34
-rw-r--r--drivers/net/usb/lan78xx.c34
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c10
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c9
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/fabrics.c18
-rw-r--r--drivers/nvme/host/fc.c21
-rw-r--r--drivers/nvme/host/pci.c48
-rw-r--r--drivers/nvme/host/rdma.c9
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/fabrics-cmd.c9
-rw-r--r--drivers/nvme/target/fc.c24
-rw-r--r--drivers/nvme/target/fcloop.c104
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvmem/core.c3
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c13
-rw-r--r--drivers/pci/host/pci-aardvark.c2
-rw-r--r--drivers/pci/host/pci-tegra.c22
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/perf/arm_pmu_acpi.c1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c18
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c82
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c14
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c10
-rw-r--r--drivers/rapidio/devices/tsi721.c7
-rw-r--r--drivers/rapidio/rio-access.c40
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/imx_rproc.c9
-rw-r--r--drivers/reset/Kconfig9
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-hsdk.c (renamed from drivers/reset/reset-hsdk-v1.c)44
-rw-r--r--drivers/reset/reset-socfpga.c17
-rw-r--r--drivers/rpmsg/qcom_glink_native.c14
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c12
-rw-r--r--drivers/s390/cio/io_sch.h2
-rw-r--r--drivers/scsi/aacraid/aachba.c12
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c19
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c18
-rw-r--r--drivers/scsi/sd.c37
-rw-r--r--drivers/scsi/sg.c64
-rw-r--r--drivers/spi/spi-armada-3700.c145
-rw-r--r--drivers/spi/spi-bcm-qspi.c9
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/staging/iio/adc/ad7192.c4
-rw-r--r--drivers/staging/iio/meter/ade7759.c2
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c8
-rw-r--r--drivers/staging/pi433/rf69.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c2
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c4
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c13
-rw-r--r--drivers/staging/speakup/main.c15
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c19
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/tty/mxser.c16
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c40
-rw-r--r--drivers/tty/serial/sccnxp.c13
-rw-r--r--drivers/tty/tty_ldisc.c11
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/config.c22
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/hub.c13
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/ep0.c7
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/configfs.c15
-rw-r--r--drivers/usb/gadget/configfs.h11
-rw-r--r--drivers/usb/gadget/function/f_fs.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h14
-rw-r--r--drivers/usb/gadget/function/f_printer.c7
-rw-r--r--drivers/usb/gadget/function/f_rndis.c12
-rw-r--r--drivers/usb/gadget/function/u_fs.h1
-rw-r--r--drivers/usb/gadget/function/u_rndis.h1
-rw-r--r--drivers/usb/gadget/legacy/inode.c46
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c26
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c74
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c19
-rw-r--r--drivers/usb/host/pci-quirks.c10
-rw-r--r--drivers/usb/host/xhci-hub.c37
-rw-r--r--drivers/usb/host/xhci-pci.c12
-rw-r--r--drivers/usb/host/xhci-plat.c16
-rw-r--r--drivers/usb/host/xhci-ring.c21
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h10
-rw-r--r--drivers/usb/misc/usbtest.c10
-rw-r--r--drivers/usb/musb/musb_core.c21
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c94
-rw-r--r--drivers/usb/musb/sunxi.c2
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c23
-rw-r--r--drivers/usb/serial/console.c3
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c4
-rw-r--r--drivers/usb/storage/transport.c14
-rw-r--r--drivers/usb/storage/uas-detect.h15
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/uwbd.c12
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c11
-rw-r--r--drivers/xen/xenbus/xenbus_client.c130
547 files changed, 6198 insertions, 3450 deletions
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index bf22c29d2517..11b113f8e367 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void)
for (i = 0; i < wdat->entries; i++) {
const struct acpi_generic_address *gas;
struct resource_entry *rentry;
- struct resource res;
+ struct resource res = {};
bool found;
gas = &entries[i].register_region;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 077f9bad6f44..3c3a37b8503b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
}
ghes_do_proc(ghes, ghes->estatus);
+out:
+ ghes_clear_estatus(ghes);
+
+ if (rc == -ENOENT)
+ return rc;
+
/*
* GHESv2 type HEST entries introduce support for error acknowledgment,
* so only acknowledge the error if this support is present.
*/
- if (is_hest_type_generic_v2(ghes)) {
- rc = ghes_ack_error(ghes->generic_v2);
- if (rc)
- return rc;
- }
-out:
- ghes_clear_estatus(ghes);
+ if (is_hest_type_generic_v2(ghes))
+ return ghes_ack_error(ghes->generic_v2);
+
return rc;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9565d572f8dd..de56394dd161 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1178,12 +1178,44 @@ dev_put:
return ret;
}
+static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
+{
+ if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ struct acpi_iort_node *parent;
+ struct acpi_iort_id_mapping *map;
+ int i;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
+ iort_node->mapping_offset);
+
+ for (i = 0; i < iort_node->mapping_count; i++, map++) {
+ if (!map->output_reference)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node,
+ iort_table, map->output_reference);
+ /*
+ * If we detect a RC->SMMU mapping, make sure
+ * we enable ACS on the system.
+ */
+ if ((parent->type == ACPI_IORT_NODE_SMMU) ||
+ (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
+ pci_request_acs();
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static void __init iort_init_platform_devices(void)
{
struct acpi_iort_node *iort_node, *iort_end;
struct acpi_table_iort *iort;
struct fwnode_handle *fwnode;
int i, ret;
+ bool acs_enabled = false;
/*
* iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
return;
}
+ if (!acs_enabled)
+ acs_enabled = iort_enable_acs(iort_node);
+
if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index c1c216163de3..e26ea209b63e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* }
* }
*
- * Calling this function with index %2 return %-ENOENT and with index %3
- * returns the last entry. If the property does not contain any more values
- * %-ENODATA is returned. The NULL entry must be single integer and
- * preferably contain value %0.
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
*
* Return: %0 on success, negative error code on failure.
*/
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
data = acpi_device_data_of_node(fwnode);
if (!data)
- return -EINVAL;
+ return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
- return ret;
+ return ret == -EINVAL ? -ENOENT : -EINVAL;
/*
* The simplest case is when the value is a single reference. Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(obj->reference.handle, &device);
if (ret)
- return ret;
+ return ret == -ENODEV ? -EINVAL : ret;
args->adev = device;
args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
* The index argument is then used to determine which reference
* the caller wants (along with the arguments).
*/
- if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
- return -EPROTO;
+ if (obj->type != ACPI_TYPE_PACKAGE)
+ return -EINVAL;
+ if (index >= obj->package.count)
+ return -ENOENT;
element = obj->package.elements;
end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
- return -ENODEV;
+ return -EINVAL;
nargs = 0;
element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
else if (type == ACPI_TYPE_LOCAL_REFERENCE)
break;
else
- return -EPROTO;
+ return -EINVAL;
}
if (nargs > MAX_ACPI_REFERENCE_ARGS)
- return -EPROTO;
+ return -EINVAL;
if (idx == index) {
args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
element++;
} else {
- return -EPROTO;
+ return -EINVAL;
}
idx++;
}
- return -ENODATA;
+ return -ENOENT;
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
@@ -908,11 +909,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
const struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct acpi_device *child_adev = NULL;
const struct list_head *head;
struct list_head *next;
if (!child || is_acpi_device_node(child)) {
+ struct acpi_device *child_adev;
+
if (adev)
head = &adev->children;
else
@@ -922,8 +924,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
goto nondev;
if (child) {
- child_adev = to_acpi_device_node(child);
- next = child_adev->node.next;
+ adev = to_acpi_device_node(child);
+ next = adev->node.next;
if (next == head) {
child = NULL;
goto nondev;
@@ -941,8 +943,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
- if (child_adev)
- head = &child_adev->data.subnodes;
+ if (adev)
+ head = &adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
@@ -1293,3 +1295,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
const struct fwnode_operations acpi_static_fwnode_ops;
+
+bool is_acpi_device_node(const struct fwnode_handle *fwnode)
+{
+ return !IS_ERR_OR_NULL(fwnode) &&
+ fwnode->ops == &acpi_device_fwnode_ops;
+}
+EXPORT_SYMBOL(is_acpi_device_node);
+
+bool is_acpi_data_node(const struct fwnode_handle *fwnode)
+{
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
+}
+EXPORT_SYMBOL(is_acpi_data_node);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d055b3f2a207..fddf76ef5bd6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
debug_id, (u64)fda->num_fds);
continue;
}
- fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
task_close_fd(proc, fd_array[fd_index]);
} break;
@@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
@@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
parent_buffer = parent->buffer -
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
- fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
@@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
proc->pid, thread->pid);
return -EINVAL;
}
- parent_buffer = (u8 *)(parent->buffer -
+ parent_buffer = (u8 *)((uintptr_t)parent->buffer -
binder_alloc_get_user_buffer_offset(
&target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@@ -2583,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true;
}
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node: struct binder_node for which to get refs
+ * @proc: returns @node->proc if valid
+ * @error: if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+ struct binder_node *node,
+ struct binder_proc **procp,
+ uint32_t *error)
+{
+ struct binder_node *target_node = NULL;
+
+ binder_node_inner_lock(node);
+ if (node->proc) {
+ target_node = node;
+ binder_inc_node_nilocked(node, 1, 0, NULL);
+ binder_inc_node_tmpref_ilocked(node);
+ node->proc->tmp_ref++;
+ *procp = node->proc;
+ } else
+ *error = BR_DEAD_REPLY;
+ binder_node_inner_unlock(node);
+
+ return target_node;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2686,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
- binder_inc_node(ref->node, 1, 0, NULL);
- target_node = ref->node;
- }
- binder_proc_unlock(proc);
- if (target_node == NULL) {
+ target_node = binder_get_node_refs_for_txn(
+ ref->node, &target_proc,
+ &return_error);
+ } else {
binder_user_error("%d:%d got transaction to invalid handle\n",
- proc->pid, thread->pid);
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- return_error_param = -EINVAL;
- return_error_line = __LINE__;
- goto err_invalid_target_handle;
}
+ binder_proc_unlock(proc);
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
- if (target_node == NULL) {
+ if (target_node)
+ target_node = binder_get_node_refs_for_txn(
+ target_node, &target_proc,
+ &return_error);
+ else
return_error = BR_DEAD_REPLY;
- mutex_unlock(&context->context_mgr_node_lock);
- return_error_line = __LINE__;
- goto err_no_context_mgr_node;
- }
- binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock);
}
- e->to_node = target_node->debug_id;
- binder_node_lock(target_node);
- target_proc = target_node->proc;
- if (target_proc == NULL) {
- binder_node_unlock(target_node);
- return_error = BR_DEAD_REPLY;
+ if (!target_node) {
+ /*
+ * return_error is set above
+ */
+ return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
- binder_inner_proc_lock(target_proc);
- target_proc->tmp_ref++;
- binder_inner_proc_unlock(target_proc);
- binder_node_unlock(target_node);
+ e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -3072,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
@@ -3083,6 +3118,7 @@ static void binder_transaction(struct binder_proc *proc,
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
+ binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -3090,6 +3126,8 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3104,13 +3142,14 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
-err_no_context_mgr_node:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
- if (target_node)
+ if (target_node) {
binder_dec_node(target_node, 1, 0);
+ binder_dec_node_tmpref(target_node);
+ }
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
@@ -3623,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc,
}
}
-static int binder_has_thread_work(struct binder_thread *thread)
-{
- return !binder_worklist_empty(thread->proc, &thread->todo) ||
- thread->looper_need_return;
-}
-
static int binder_put_node_cmd(struct binder_proc *proc,
struct binder_thread *thread,
void __user **ptrp,
@@ -4258,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp,
binder_inner_proc_unlock(thread->proc);
- if (binder_has_work(thread, wait_for_proc_work))
- return POLLIN;
-
poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
+ if (binder_has_work(thread, wait_for_proc_work))
return POLLIN;
return 0;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 8fe165844e47..c2819a3d58a6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm)
- mm = get_task_mm(alloc->tsk);
+ if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
if (!vma && need_mm) {
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
- prev->data, next->data);
+ prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE,
NULL);
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ mmgrab(alloc->vma_vm_mm);
return 0;
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
@@ -913,6 +910,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
struct binder_alloc *alloc;
uintptr_t page_addr;
size_t index;
+ struct vm_area_struct *vma;
alloc = page->alloc;
if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +921,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- if (alloc->vma) {
- mm = get_task_mm(alloc->tsk);
- if (!mm)
- goto err_get_task_mm_failed;
+ vma = alloc->vma;
+ if (vma) {
+ if (!mmget_not_zero(alloc->vma_vm_mm))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
+ }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+ if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(alloc->vma,
+ zap_page_range(vma,
page_addr + alloc->user_buffer_offset,
PAGE_SIZE);
@@ -950,14 +954,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_kernel_end(alloc, index);
- list_lru_isolate(lru, item);
-
+ spin_lock(lock);
mutex_unlock(&alloc->mutex);
- return LRU_REMOVED;
+ return LRU_REMOVED_RETRY;
err_down_write_mmap_sem_failed:
- mmput(mm);
-err_get_task_mm_failed:
+ mmput_async(mm);
+err_mmget:
err_page_already_freed:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
@@ -996,7 +999,6 @@ struct shrinker binder_shrinker = {
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index a3a3602c689c..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,6 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cb9b0e9090e3..9f78bb03bb76 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
+ int rc;
- ahci_reset_controller(host);
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
struct ahci_host_priv *hpriv = host->private_data;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 8401c3b5be92..b702c20fbc2b 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = {
{ 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
+ { 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
{ 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1945a8ea2099..ee4c1ec9dca0 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = {
};
#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
-#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
+#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
{
- q->setup = EZ(t->setup * 1000, T);
- q->act8b = EZ(t->act8b * 1000, T);
- q->rec8b = EZ(t->rec8b * 1000, T);
- q->cyc8b = EZ(t->cyc8b * 1000, T);
- q->active = EZ(t->active * 1000, T);
- q->recover = EZ(t->recover * 1000, T);
- q->dmack_hold = EZ(t->dmack_hold * 1000, T);
- q->cycle = EZ(t->cycle * 1000, T);
- q->udma = EZ(t->udma * 1000, UT);
+ q->setup = EZ(t->setup, T);
+ q->act8b = EZ(t->act8b, T);
+ q->rec8b = EZ(t->rec8b, T);
+ q->cyc8b = EZ(t->cyc8b, T);
+ q->active = EZ(t->active, T);
+ q->recover = EZ(t->recover, T);
+ q->dmack_hold = EZ(t->dmack_hold, T);
+ q->cycle = EZ(t->cycle, T);
+ q->udma = EZ(t->udma, UT);
}
void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index cfeb049a01ef..642afd88870b 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
static int charlcd_open(struct inode *inode, struct file *file)
{
struct charlcd_priv *priv = to_priv(the_charlcd);
+ int ret;
+ ret = -EBUSY;
if (!atomic_dec_and_test(&charlcd_available))
- return -EBUSY; /* open only once at a time */
+ goto fail; /* open only once at a time */
+ ret = -EPERM;
if (file->f_mode & FMODE_READ) /* device is write-only */
- return -EPERM;
+ goto fail;
if (priv->must_clear) {
charlcd_clear_display(&priv->lcd);
priv->must_clear = false;
}
return nonseekable_open(inode, file);
+
+ fail:
+ atomic_inc(&charlcd_available);
+ return ret;
}
static int charlcd_release(struct inode *inode, struct file *file)
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index df126dcdaf18..6911acd896d9 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
static int keypad_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = -EBUSY;
if (!atomic_dec_and_test(&keypad_available))
- return -EBUSY; /* open only once at a time */
+ goto fail; /* open only once at a time */
+ ret = -EPERM;
if (file->f_mode & FMODE_WRITE) /* device is read-only */
- return -EPERM;
+ goto fail;
keypad_buflen = 0; /* flush the buffer on opening */
return 0;
+ fail:
+ atomic_inc(&keypad_available);
+ return ret;
}
static int keypad_release(struct inode *inode, struct file *file)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 41be9ff7d70a..6df7d6676a48 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
}
#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+static cpumask_var_t cpus_to_visit __initdata;
+static void __init parsing_done_workfn(struct work_struct *work);
+static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-static int
+static int __init
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
@@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
return 0;
}
-static struct notifier_block init_cpu_capacity_notifier = {
+static struct notifier_block init_cpu_capacity_notifier __initdata = {
.notifier_call = init_cpu_capacity_callback,
};
@@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void)
}
core_initcall(register_cpufreq_notifier);
-static void parsing_done_workfn(struct work_struct *work)
+static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index a39b2166b145..744f64f43454 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
struct dma_coherent_mem *mem = rmem->priv;
int ret;
- if (!mem)
- return -ENODEV;
-
- ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
- DMA_MEMORY_EXCLUSIVE, &mem);
-
- if (ret) {
- pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
- &rmem->base, (unsigned long)rmem->size / SZ_1M);
- return ret;
+ if (!mem) {
+ ret = dma_init_coherent_memory(rmem->base, rmem->base,
+ rmem->size,
+ DMA_MEMORY_EXCLUSIVE, &mem);
+ if (ret) {
+ pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return ret;
+ }
}
mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3855902f2c5b..aae2402f3791 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
+ ssize_t n;
+ cpumask_var_t mask;
struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
- return cpumap_print_to_pagebuf(list, buf, mask);
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return 0;
+
+ cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+ n = cpumap_print_to_pagebuf(list, buf, mask);
+ free_cpumask_var(mask);
+
+ return n;
}
static inline ssize_t node_read_cpumask(struct device *dev,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index d1bd99271066..9045c5f3734e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ea1732ed7a9d..770b1539a083 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.no_pm_callbacks =
- (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
- (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+ (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+ !dev->bus->suspend && !dev->bus->resume)) &&
+ (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
+ !dev->class->suspend && !dev->class->resume)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
- (!dev->driver || pm_ops_is_empty(dev->driver->pm));
+ (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+ !dev->driver->suspend && !dev->driver->resume));
spin_unlock_irq(&dev->power.lock);
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index a8cc14fd8ae4..a6de32530693 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
opp->available = availability_req;
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+
/* Notify the change of the OPP availability */
if (availability_req)
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
blocking_notifier_call_chain(&opp_table->head,
OPP_EVENT_DISABLE, opp);
+ dev_pm_opp_put(opp);
+ goto put_table;
+
unlock:
mutex_unlock(&opp_table->lock);
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index f850daeffba4..277d43a83f53 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
-static bool dev_pm_qos_invalid_request(struct device *dev,
- struct dev_pm_qos_request *req)
+static bool dev_pm_qos_invalid_req_type(struct device *dev,
+ enum dev_pm_qos_req_type type)
{
- return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
- && !dev->power.set_latency_tolerance);
+ return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
+ !dev->power.set_latency_tolerance;
}
static int __dev_pm_qos_add_request(struct device *dev,
@@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev,
{
int ret = 0;
- if (!dev || dev_pm_qos_invalid_request(dev, req))
+ if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
return -EINVAL;
if (WARN(dev_pm_qos_request_active(req),
diff --git a/drivers/base/property.c b/drivers/base/property.c
index d0b65bbe7e15..7ed99c1b2a8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,6 +21,7 @@
#include <linux/phy.h>
struct property_set {
+ struct device *dev;
struct fwnode_handle fwnode;
const struct property_entry *properties;
};
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* Caller is responsible to call fwnode_handle_put() on the returned
* args->fwnode pointer.
*
+ * Returns: %0 on success
+ * %-ENOENT when the index is out of bounds, the index has an empty
+ * reference or the property was not found
+ * %-EINVAL on parse error
*/
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
+ struct property_set *pset;
fwnode = dev_fwnode(dev);
if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
- if (is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode);
+ if (pset) {
set_primary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
} else {
- fwnode = fwnode->secondary;
- if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode->secondary);
+ if (pset && dev == pset->dev)
set_secondary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
- }
}
+ if (pset && dev == pset->dev)
+ pset_free_set(pset);
}
EXPORT_SYMBOL_GPL(device_remove_properties);
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
+ p->dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(device_add_properties);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4a438b8abe27..2dfe99b328f8 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,7 +17,7 @@ if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
- depends on CONFIGFS_FS
+ select CONFIGFS_FS
config BLK_DEV_FD
tristate "Normal floppy disk support"
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bbd0d186cfc0..2d7178f7754e 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
if (!brd)
return -ENODEV;
- page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
+ page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
if (!page)
return -ENOSPC;
*kaddr = page_address(page);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index f68c1d50802f..1f3956702993 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -67,10 +67,8 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
struct request *rq;
- union {
- bool use_aio; /* use AIO interface to handle I/O */
- atomic_t ref; /* only for aio */
- };
+ bool use_aio; /* use AIO interface to handle I/O */
+ atomic_t ref; /* only for aio */
long ret;
struct kiocb iocb;
struct bio_vec *bvec;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2aa87cbdede0..baebbdfd74d5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
struct nbd_config *config = nbd->config;
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
- nbd_size_update(nbd);
}
static void nbd_complete_rq(struct request *req)
@@ -820,9 +819,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
+ if (ret < 0)
+ ret = BLK_STS_IOERR;
+ else if (!ret)
+ ret = BLK_STS_OK;
complete(&cmd->send_complete);
- return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
+ return ret;
}
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -1090,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(recv_workqueue, &args->work);
}
+ nbd_size_update(nbd);
return error;
}
@@ -1194,6 +1198,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ /* The block layer will pass back some non-nbd ioctls in case we have
+ * special handling for them, but we don't so just return an error.
+ */
+ if (_IOC_TYPE(cmd) != 0xab)
+ return -EINVAL;
+
mutex_lock(&nbd->config_lock);
/* Don't allow ioctl operations on a nbd device that was created with
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7cedb4295e9d..64d0fc17c174 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
return NULL;
*dma_handle = dma_map_single(dev, buf, s->size, dir);
if (dma_mapping_error(dev, *dma_handle)) {
- kfree(buf);
+ kmem_cache_free(s, buf);
buf = NULL;
}
return buf;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 2981c27d3aae..f149d3e61234 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
}
-static bool zram_same_page_read(struct zram *zram, u32 index,
- struct page *page,
- unsigned int offset, unsigned int len)
-{
- zram_slot_lock(zram, index);
- if (unlikely(!zram_get_handle(zram, index) ||
- zram_test_flag(zram, index, ZRAM_SAME))) {
- void *mem;
-
- zram_slot_unlock(zram, index);
- mem = kmap_atomic(page);
- zram_fill_page(mem + offset, len,
- zram_get_element(zram, index));
- kunmap_atomic(mem);
- return true;
- }
- zram_slot_unlock(zram, index);
-
- return false;
-}
-
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
zram_slot_unlock(zram, index);
}
- if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
- return 0;
-
zram_slot_lock(zram, index);
handle = zram_get_handle(zram, index);
+ if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
+ unsigned long value;
+ void *mem;
+
+ value = handle ? zram_get_element(zram, index) : 0;
+ mem = kmap_atomic(page);
+ zram_fill_page(mem, PAGE_SIZE, value);
+ kunmap_atomic(mem);
+ zram_slot_unlock(zram, index);
+ return 0;
+ }
+
size = zram_get_obj_size(zram, index);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f396903184..70db4d5638a6 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & DDR_BASE_CS_LOW_MASK;
- w->size = (size | ~DDR_SIZE_MASK) + 1;
+ w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
}
}
mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index fe597e6c55c4..1d6729be4cd6 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
goto out;
}
- msleep(TPM_TIMEOUT); /* CHECK */
+ tpm_msleep(TPM_TIMEOUT);
rmb();
} while (time_before(jiffies, stop));
@@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
dev_info(
&chip->dev, HW_ERR
"TPM command timed out during continue self test");
- msleep(delay_msec);
+ tpm_msleep(delay_msec);
continue;
}
@@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
}
if (rc != TPM_WARN_DOING_SELFTEST)
return rc;
- msleep(delay_msec);
+ tpm_msleep(delay_msec);
} while (--loops > 0);
return rc;
@@ -1085,7 +1085,7 @@ again:
}
} else {
do {
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev)
*/
if (rc != TPM_WARN_RETRY)
break;
- msleep(TPM_TIMEOUT_RETRY);
+ tpm_msleep(TPM_TIMEOUT_RETRY);
}
if (rc)
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 04fbff2edbf3..2d5466a72e40 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -50,7 +50,8 @@ enum tpm_const {
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
- TPM_TIMEOUT_RETRY = 100 /* msecs */
+ TPM_TIMEOUT_RETRY = 100, /* msecs */
+ TPM_TIMEOUT_RANGE_US = 300 /* usecs */
};
/* TPM addresses */
@@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev);
int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
wait_queue_head_t *queue, bool check_cancel);
+static inline void tpm_msleep(unsigned int delay_msec)
+{
+ usleep_range(delay_msec * 1000,
+ (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+};
+
struct tpm_chip *tpm_chip_find_get(int chip_num);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f7f34b2aa981..e1a41b788f08 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
if (rc != TPM2_RC_TESTING)
break;
- msleep(delay_msec);
+ tpm_msleep(delay_msec);
}
return rc;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a4ac63a21d8a..8f0a98dea327 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = {
SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
};
-static struct acpi_device_id crb_device_ids[] = {
+static const struct acpi_device_id crb_device_ids[] = {
{"MSFT0101", 0},
{"", 0},
};
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index f01d083eced2..25f6e2665385 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -32,26 +32,70 @@
static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
-static struct vio_device_id tpm_ibmvtpm_device_table[] = {
+static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
{ "IBM,vtpm", "IBM,vtpm"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
/**
+ *
+ * ibmvtpm_send_crq_word - Send a CRQ request
+ * @vdev: vio device struct
+ * @w1: pre-constructed first word of tpm crq (second word is reserved)
+ *
+ * Return:
+ * 0 - Success
+ * Non-zero - Failure
+ */
+static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
+{
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
+}
+
+/**
+ *
* ibmvtpm_send_crq - Send a CRQ request
*
* @vdev: vio device struct
- * @w1: first word
- * @w2: second word
+ * @valid: Valid field
+ * @msg: Type field
+ * @len: Length field
+ * @data: Data field
+ *
+ * The ibmvtpm crq is defined as follows:
+ *
+ * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * -----------------------------------------------------------------------
+ * Word0 | Valid | Type | Length | Data
+ * -----------------------------------------------------------------------
+ * Word1 | Reserved
+ * -----------------------------------------------------------------------
+ *
+ * Which matches the following structure (on bigendian host):
+ *
+ * struct ibmvtpm_crq {
+ * u8 valid;
+ * u8 msg;
+ * __be16 len;
+ * __be32 data;
+ * __be64 reserved;
+ * } __attribute__((packed, aligned(8)));
+ *
+ * However, the value is passed in a register so just compute the numeric value
+ * to load into the register avoiding byteswap altogether. Endian only affects
+ * memory loads and stores - registers are internally represented the same.
*
* Return:
- * 0 -Sucess
+ * 0 (H_SUCCESS) - Success
* Non-zero - Failure
*/
-static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
+static int ibmvtpm_send_crq(struct vio_dev *vdev,
+ u8 valid, u8 msg, u16 len, u32 data)
{
- return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
+ u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
+ (u64)data;
+ return ibmvtpm_send_crq_word(vdev, w1);
}
/**
@@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- struct ibmvtpm_crq crq;
- __be64 *word = (__be64 *)&crq;
int rc, sig;
if (!ibmvtpm->rtce_buf) {
@@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
spin_lock(&ibmvtpm->rtce_lock);
ibmvtpm->res_len = 0;
memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_TPM_COMMAND;
- crq.len = cpu_to_be16(count);
- crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
/*
* set the processing flag before the Hcall, since we may get the
@@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
*/
ibmvtpm->tpm_processing_cmd = true;
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
- be64_to_cpu(word[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+ count, ibmvtpm->rtce_dma_handle);
if (rc != H_SUCCESS) {
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
@@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
*/
static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
{
- struct ibmvtpm_crq crq;
- u64 *buf = (u64 *) &crq;
int rc;
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
-
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
- cpu_to_be64(buf[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
@@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
*/
static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
{
- struct ibmvtpm_crq crq;
- u64 *buf = (u64 *) &crq;
int rc;
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_GET_VERSION;
-
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
- cpu_to_be64(buf[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_get_version failed rc=%d\n", rc);
@@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
@@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_send_init failed rc=%d\n", rc);
@@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- struct ibmvtpm_crq crq;
- u64 *buf = (u64 *) &crq;
int rc = 0;
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
-
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
- cpu_to_be64(buf[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 3b1b9f9322d5..d8f10047fbba 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
/* check the status-register if wait_for_bit is set */
if (status & 1 << wait_for_bit)
break;
- msleep(TPM_MSLEEP_TIME);
+ tpm_msleep(TPM_MSLEEP_TIME);
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
@@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip)
wait_and_send(chip, TPM_CTRL_WTX);
wait_and_send(chip, 0x00);
wait_and_send(chip, 0x00);
- msleep(TPM_WTX_MSLEEP_TIME);
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
}
static void tpm_wtx_abort(struct tpm_chip *chip)
@@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip)
wait_and_send(chip, 0x00);
wait_and_send(chip, 0x00);
number_of_wtx = 0;
- msleep(TPM_WTX_MSLEEP_TIME);
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
}
static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index b617b2eeb080..63bc6c3b949e 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l)
if (access & TPM_ACCESS_VALID)
return 0;
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -1;
}
@@ -117,7 +117,7 @@ again:
do {
if (check_locality(chip, l))
return l;
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
}
return -1;
@@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -EBUSY;
}
@@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
priv->irq = irq;
chip->flags |= TPM_CHIP_FLAG_IRQ;
if (!priv->irq_tested)
- msleep(1);
+ tpm_msleep(1);
if (!priv->irq_tested)
disable_interrupts(chip);
priv->irq_tested = true;
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index c834f5abfc49..4c10456f8a32 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -105,6 +105,7 @@ err:
return ret;
}
+EXPORT_SYMBOL_GPL(clk_bulk_prepare);
#endif /* CONFIG_HAVE_CLK_PREPARE */
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 62d7854e4b87..5970a50671b9 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(10), 8, GFLAGS),
GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 0, GFLAGS),
GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 1, GFLAGS),
GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 2, GFLAGS),
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(2), 15, GFLAGS),
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
/* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
"aclk_peri",
"hclk_peri",
"pclk_peri",
+ "pclk_pmu",
+ "sclk_timer5",
};
static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index e40b77583c47..d8d3cb67b402 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
#define PLL_ENABLED (1 << 31)
#define PLL_LOCKED (1 << 29)
+static void exynos4_clk_enable_pll(u32 reg)
+{
+ u32 pll_con = readl(reg_base + reg);
+ pll_con |= PLL_ENABLED;
+ writel(pll_con, reg_base + reg);
+
+ while (!(pll_con & PLL_LOCKED)) {
+ cpu_relax();
+ pll_con = readl(reg_base + reg);
+ }
+}
+
static void exynos4_clk_wait_for_pll(u32 reg)
{
u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
samsung_clk_save(reg_base, exynos4_save_pll,
ARRAY_SIZE(exynos4_clk_pll_regs));
+ exynos4_clk_enable_pll(EPLL_CON0);
+ exynos4_clk_enable_pll(VPLL_CON0);
+
if (exynos4_soc == EXYNOS4210) {
samsung_clk_save(reg_base, exynos4_save_soc,
ARRAY_SIZE(exynos4210_clk_save));
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index a1df588343f2..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
- if (clockevent_state_shutdown(&cs5535_clockevent))
+ if (clockevent_state_detached(&cs5535_clockevent) ||
+ clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c
index 6a20dc8b253f..9a7d7f0f23fe 100644
--- a/drivers/clocksource/numachip.c
+++ b/drivers/clocksource/numachip.c
@@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi
return 0;
}
-static struct clock_event_device numachip2_clockevent = {
+static const struct clock_event_device numachip2_clockevent __initconst = {
.name = "numachip2",
.rating = 400,
.set_next_event = numachip2_set_next_event,
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 2ff64d9d4fb3..62d24690ba02 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void)
return -readl(sched_clk_base + TIMER_VALUE);
}
-static int integrator_clocksource_init(unsigned long inrate,
- void __iomem *base)
+static int __init integrator_clocksource_init(unsigned long inrate,
+ void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a020da7940d6..a753c50e9e41 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -106,6 +106,22 @@ static const struct of_device_id whitelist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blacklist[] __initconst = {
+ { .compatible = "calxeda,highbank", },
+ { .compatible = "calxeda,ecx-2000", },
+
+ { .compatible = "marvell,armadaxp", },
+
+ { .compatible = "nvidia,tegra124", },
+
+ { .compatible = "st,stih407", },
+ { .compatible = "st,stih410", },
+
+ { .compatible = "sigma,tango4", },
+
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,am43", },
+ { .compatible = "ti,dra7", },
+
{ }
};
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index b29cd3398463..4bf47de6101f 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
- { .compatible = "ti,am4372", .data = &am4x_soc_data, },
+ { .compatible = "ti,am43", .data = &am4x_soc_data, },
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
{},
};
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 7080c384ad5d..52a75053ee03 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -104,13 +104,13 @@ static int __init arm_idle_init(void)
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0) {
ret = ret ? : -ENODEV;
- goto out_fail;
+ goto init_fail;
}
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("Failed to register cpuidle driver\n");
- goto out_fail;
+ goto init_fail;
}
/*
@@ -149,6 +149,8 @@ static int __init arm_idle_init(void)
}
return 0;
+init_fail:
+ kfree(drv);
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index d9fbbf01062b..0f9754e07719 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
/* The crypto framework makes it hard to avoid this global. */
static struct device *artpec6_crypto_dev;
-static struct dentry *dbgfs_root;
-
#ifdef CONFIG_FAULT_INJECTION
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
char *desc;
};
+static struct dentry *dbgfs_root;
+
static void artpec6_crypto_init_debugfs(void)
{
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e36aeacd7635..1eb852765469 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,7 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ select SOC_BUS
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_IMX
- def_bool SOC_IMX6 || SOC_IMX7D
- depends on CRYPTO_DEV_FSL_CAAM
-
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dacb53fb690e..027e121c6f70 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/sys_soc.h>
#include "compat.h"
#include "regs.h"
@@ -19,6 +20,8 @@ bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2);
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
*/
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
static inline struct clk *caam_drv_identify_clk(struct device *dev,
char *clk_name)
{
- return devm_clk_get(dev, clk_name);
+ return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
}
-#else
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
- char *clk_name)
-{
- return NULL;
-}
-#endif
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev)
{
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
+ static const struct soc_device_attribute imx_soc[] = {
+ {.family = "Freescale i.MX"},
+ {},
+ };
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
@@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ctrlpriv);
nprop = pdev->dev.of_node;
+ caam_imx = (bool)soc_device_match(imx_soc);
+
/* Enable clocking */
clk = caam_drv_identify_clk(&pdev->dev, "ipg");
if (IS_ERR(clk)) {
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 2b5efff9ec3c..17cfd23a38fa 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -67,6 +67,7 @@
*/
extern bool caam_little_end;
+extern bool caam_imx;
#define caam_to_cpu(len) \
static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg)
#else /* CONFIG_64BIT */
static inline void wr_reg64(void __iomem *reg, u64 data)
{
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
- if (caam_little_end) {
+ if (!caam_imx && caam_little_end) {
wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
wr_reg32((u32 __iomem *)(reg), data);
- } else
-#endif
- {
+ } else {
wr_reg32((u32 __iomem *)(reg), data >> 32);
wr_reg32((u32 __iomem *)(reg) + 1, data);
}
@@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
static inline u64 rd_reg64(void __iomem *reg)
{
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
- if (caam_little_end)
+ if (!caam_imx && caam_little_end)
return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
(u64)rd_reg32((u32 __iomem *)(reg)));
- else
-#endif
- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+
+ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg) + 1));
}
#endif /* CONFIG_64BIT */
+static inline u64 cpu_to_caam_dma64(dma_addr_t value)
+{
+ if (caam_imx)
+ return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
+ (u64)cpu_to_caam32(upper_32_bits(value)));
+
+ return cpu_to_caam64(value);
+}
+
+static inline u64 caam_dma64_to_cpu(u64 value)
+{
+ if (caam_imx)
+ return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
+ (u64)caam32_to_cpu(upper_32_bits(value)));
+
+ return caam64_to_cpu(value);
+}
+
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#ifdef CONFIG_SOC_IMX7D
-#define cpu_to_caam_dma(value) \
- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
- (u64)cpu_to_caam32(upper_32_bits(value)))
-#define caam_dma_to_cpu(value) \
- (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
- (u64)caam32_to_cpu(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma(value) cpu_to_caam64(value)
-#define caam_dma_to_cpu(value) caam64_to_cpu(value)
-#endif /* CONFIG_SOC_IMX7D */
+#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
+#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
#else
#define cpu_to_caam_dma(value) cpu_to_caam32(value)
#define caam_dma_to_cpu(value) caam32_to_cpu(value)
-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
-
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-#define cpu_to_caam_dma64(value) \
- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
- (u64)cpu_to_caam32(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
-#endif
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
/*
* jr_outentry
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d2207ac5ba19..5438552bc6d7 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req;
- struct safexcel_inv_result result = { 0 };
+ struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct skcipher_request));
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 3f819399cd95..3980f946874f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct ahash_request req;
- struct safexcel_inv_result result = { 0 };
+ struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct ahash_request));
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index b585ce54a802..4835dd4a9e50 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct scatterlist sg[1], *tsg;
- int err = 0, len = 0, reg, ncp;
+ int err = 0, len = 0, reg, ncp = 0;
unsigned int i;
- const u32 *buffer = (const u32 *)rctx->buffer;
+ u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
reg |= HASH_CR_DMAA;
stm32_hash_write(hdev, HASH_CR, reg);
- for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
- stm32_hash_write(hdev, HASH_DIN, buffer[i]);
-
- stm32_hash_set_nblw(hdev, ncp);
+ if (ncp) {
+ memset(buffer + ncp, 0,
+ DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
+ writesl(hdev->io_base + HASH_DIN, buffer,
+ DIV_ROUND_UP(ncp, sizeof(u32)));
+ }
+ stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 79791c690858..dff88838dce7 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
} else {
desc->ptr[1] = zero_entry;
- /* Indicate next op is not the first. */
- req_ctx->first = 0;
}
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
+ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
else
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
- t_alg->algt.alg.hash.setkey = ahash_setkey;
+ if (!strncmp(alg->cra_name, "hmac", 4))
+ t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 66fb40d0ebdb..03830634e141 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -383,7 +383,7 @@ err_put_fd:
return err;
}
-static void sync_fill_fence_info(struct dma_fence *fence,
+static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
+
+ return info->status;
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* sync_fence_info and return the actual number of fences on
* info->num_fences.
*/
- if (!info.num_fences)
+ if (!info.num_fences) {
+ info.status = dma_fence_is_signaled(sync_file->fence);
goto no_fences;
+ } else {
+ info.status = 1;
+ }
if (info.num_fences < num_fences)
return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < num_fences; i++)
- sync_fill_fence_info(fences[i], &fence_info[i]);
+ for (i = 0; i < num_fences; i++) {
+ int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+ info.status = info.status <= 0 ? info.status : status;
+ }
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
sync_file_get_name(sync_file, info.name, sizeof(info.name));
- info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 32905d5606ac..55f9c62ee54b 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc;
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list);
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new;
dma_cookie_t cookie;
+ unsigned long flags;
new = tx_to_desc(tx);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
return cookie;
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc;
size_t copy;
u32 desc_cnt;
+ unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
do {
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i;
struct scatterlist *sg;
u32 stride;
+ unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl);
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan)
{
struct msgdma_device *mdev = to_mdev(chan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan)
{
struct msgdma_device *mdev = to_mdev(dchan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq);
}
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
+ unsigned long flags;
- spin_lock(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
* bits. So we need to just drop these values.
*/
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
- status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
+ status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
}
- spin_unlock(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3879f80a4815..a7ea20e7b8e9 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2f65a8fde21d..f1d04b70ee67 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_lock(&xbar->mutex);
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
xbar->dma_requests);
- mutex_unlock(&xbar->mutex);
if (map->xbar_out == xbar->dma_requests) {
+ mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
+ mutex_unlock(&xbar->mutex);
map->xbar_in = (u16)dma_spec->args[0];
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 08629ee69d11..00e73d28077c 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -361,12 +361,12 @@ static const struct fpga_manager_ops altera_cvp_ops = {
.write_complete = altera_cvp_write_complete,
};
-static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
}
-static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
size_t count)
{
int ret;
@@ -378,7 +378,7 @@ static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
return count;
}
-static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+static DRIVER_ATTR_RW(chkcfg);
static int altera_cvp_probe(struct pci_dev *pdev,
const struct pci_device_id *dev_id);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3388d54ba114..3f80f167ed56 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -453,7 +453,8 @@ config GPIO_TS4800
config GPIO_THUNDERX
tristate "Cavium ThunderX/OCTEON-TX GPIO"
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
- depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+ depends on PCI_MSI
+ select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS
help
Say yes here to support the on-chip GPIO lines on the ThunderX
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index dbf869fb63ce..3233b72b6828 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- irq_set_handler_locked(d, handle_edge_irq);
+ /*
+ * Edge IRQs are already cleared/acked in irq_handler and
+ * not need to be masked, as result handle_edge_irq()
+ * logic is excessed here and may cause lose of interrupts.
+ * So just use handle_simple_irq.
+ */
+ irq_set_handler_locked(d, handle_simple_irq);
return 0;
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
{
void __iomem *isr_reg = NULL;
- u32 isr;
+ u32 enabled, isr, level_mask;
unsigned int bit;
struct gpio_bank *bank = gpiobank;
unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
pm_runtime_get_sync(bank->chip.parent);
while (1) {
- u32 isr_saved, level_mask = 0;
- u32 enabled;
-
raw_spin_lock_irqsave(&bank->lock, lock_flags);
enabled = omap_get_gpio_irqbank_mask(bank);
- isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+ isr = readl_relaxed(isr_reg) & enabled;
if (bank->level_mask)
level_mask = bank->level_mask & enabled;
+ else
+ level_mask = 0;
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+ if (isr & ~level_mask)
+ omap_clear_gpio_irqbank(bank, isr & ~level_mask);
raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
/*---------------------------------------------------------------------*/
-static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+static void omap_gpio_show_rev(struct gpio_bank *bank)
{
static bool called;
u32 rev;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 4d2113530735..eb4528c87c0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (pin <= 255) {
char ev_name[5];
- sprintf(ev_name, "_%c%02X",
+ sprintf(ev_name, "_%c%02hhX",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7ef6c28a34d9..bc746131987f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d228f5a99044..dbbe986f90f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
NUM_BANKS(ADDR_SURF_2_BANK);
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
- } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) {
+ } else if (adev->asic_type == CHIP_OLAND) {
+ tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
+ } else if (adev->asic_type == CHIP_HAINAN) {
tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index e4a8c2e52cb2..660b3fbade41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
int err = 0;
dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
dev->kfd2kgd->get_tile_config(dev->kgd, &config);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5979158c3f7b..944abfad39c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd,
struct kfd_event *ev)
{
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
- pr_warn("Signal event wasn't created because limit was reached\n");
+ if (!p->signal_event_limit_reached) {
+ pr_warn("Signal event wasn't created because limit was reached\n");
+ p->signal_event_limit_reached = true;
+ }
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 681b639f5133..ed71ad40e8f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd,
- NULL,
- false,
+ kq->queue->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe,
kq->queue->queue);
@@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
uint32_t wptr, rptr;
unsigned int *queue_address;
+ /* When rptr == wptr, the buffer is empty.
+ * When rptr == wptr + 1, the buffer is full.
+ * It is always rptr that advances to the position of wptr, rather than
+ * the opposite. So we can only use up to queue_size_dwords - 1 dwords.
+ */
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
pr_debug("wptr: %d\n", wptr);
pr_debug("queue_address 0x%p\n", queue_address);
- available_size = (rptr - 1 - wptr + queue_size_dwords) %
+ available_size = (rptr + queue_size_dwords - 1 - wptr) %
queue_size_dwords;
- if (packet_size_in_dwords >= queue_size_dwords ||
- packet_size_in_dwords >= available_size) {
+ if (packet_size_in_dwords > available_size) {
/*
* make sure calling functions know
* acquire_packet_buffer() failed
@@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
}
if (wptr + packet_size_in_dwords >= queue_size_dwords) {
+ /* make sure after rolling back to position 0, there is
+ * still enough space.
+ */
+ if (packet_size_in_dwords >= rptr) {
+ *buffer_ptr = NULL;
+ return -ENOMEM;
+ }
+ /* fill nops, roll back and start at position 0 */
while (wptr > 0) {
queue_address[wptr] = kq->nop_packet;
wptr = (wptr + 1) % queue_size_dwords;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b397ec726400..b87e96cee5fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -521,6 +521,7 @@ struct kfd_process {
struct list_head signal_event_pages;
u32 next_nonsignal_event_id;
size_t signal_event_count;
+ bool signal_event_limit_reached;
};
/**
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 1cae95e2b13a..03bec765b03d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int num_queues = 0;
struct queue *cur;
- memset(&q_properties, 0, sizeof(struct queue_properties));
memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
kq = NULL;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 97c94f9683fa..38cea6fb25a8 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
struct amd_sched_rq *rq = entity->rq;
- int r;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
+
/**
* The client will not queue more IBs during this fini, consume existing
- * queued IBs or discard them on SIGKILL
+ * queued IBs
*/
- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
- r = -ERESTARTSYS;
- else
- r = wait_event_killable(sched->job_scheduled,
- amd_sched_entity_is_idle(entity));
- amd_sched_rq_remove_entity(rq, entity);
- if (r) {
- struct amd_sched_job *job;
+ wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
- */
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
- while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
- sched->ops->free_job(job);
-
- }
+ amd_sched_rq_remove_entity(rq, entity);
kfifo_free(&entity->job_queue);
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4e53aae9a1fb..0028591f3f95 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2960,6 +2960,7 @@ out:
drm_modeset_backoff(&ctx);
}
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5a634594a6ce..57881167ccd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_drm_private *priv = obj->dev->dev_private;
struct etnaviv_vram_mapping *mapping, *tmp;
/* object should not be active */
WARN_ON(is_active(etnaviv_obj));
+ mutex_lock(&priv->gem_lock);
list_del(&etnaviv_obj->gem_node);
+ mutex_unlock(&priv->gem_lock);
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 026ef4e02f85..46dfe0737f43 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
cmdbuf->user_size = ALIGN(args->stream_size, 8);
ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
- if (ret == 0)
- cmdbuf = NULL;
+ if (ret)
+ goto out;
+
+ cmdbuf = NULL;
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 730b8d9db187..6be5b53c3b27 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b1f7299600f0..82b72425a42f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,23 +168,21 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ struct exynos_drm_private *private;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- int old_dpms = connector->dpms;
-
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+ private = drm_dev->dev_private;
- /* Set the old mode back to the connector for resume */
- connector->dpms = old_dpms;
+ drm_kms_helper_poll_disable(drm_dev);
+ exynos_drm_fbdev_suspend(drm_dev);
+ private->suspend_state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(private->suspend_state)) {
+ exynos_drm_fbdev_resume(drm_dev);
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(private->suspend_state);
}
- drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -192,22 +190,15 @@ static int exynos_drm_suspend(struct device *dev)
static int exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ struct exynos_drm_private *private;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->funcs->dpms) {
- int dpms = connector->dpms;
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- connector->funcs->dpms(connector, dpms);
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ private = drm_dev->dev_private;
+ drm_atomic_helper_resume(drm_dev, private->suspend_state);
+ exynos_drm_fbdev_resume(drm_dev);
+ drm_kms_helper_poll_enable(drm_dev);
return 0;
}
@@ -439,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
kfree(drm->dev_private);
drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cf131c2aa23e..f8bae4cb4823 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -202,6 +202,7 @@ struct drm_exynos_file_private {
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
+ struct drm_atomic_state *suspend_state;
struct device *dma_dev;
void *mapping;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index c3a068409b48..dfb66ecf417b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -18,6 +18,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/exynos_drm.h>
+#include <linux/console.h>
+
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
@@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(fb_helper);
}
+
+void exynos_drm_fbdev_suspend(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ console_lock();
+ drm_fb_helper_set_suspend(private->fb_helper, 1);
+ console_unlock();
+}
+
+void exynos_drm_fbdev_resume(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ console_lock();
+ drm_fb_helper_set_suspend(private->fb_helper, 0);
+ console_unlock();
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 330eef87f718..645d1bb7f665 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
void exynos_drm_output_poll_changed(struct drm_device *dev);
+void exynos_drm_fbdev_suspend(struct drm_device *drm);
+void exynos_drm_fbdev_resume(struct drm_device *drm);
#else
@@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
#define exynos_drm_output_poll_changed (NULL)
+static inline void exynos_drm_fbdev_suspend(struct drm_device *drm)
+{
+}
+
+static inline void exynos_drm_fbdev_resume(struct drm_device *drm)
+{
+}
+
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 214fa5e51963..0109ff40b1db 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_display_mode *m;
+ struct drm_connector_list_iter conn_iter;
int mode_ok;
drm_mode_set_crtcinfo(adjusted_mode, 0);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder == encoder)
break;
}
+ if (connector)
+ drm_connector_get(connector);
+ drm_connector_list_iter_end(&conn_iter);
- if (connector->encoder != encoder)
+ if (!connector)
return true;
mode_ok = hdmi_mode_valid(connector, adjusted_mode);
if (mode_ok == MODE_OK)
- return true;
+ goto cleanup;
/*
* Find the most suitable mode and copy it to adjusted_mode.
@@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
}
}
+cleanup:
+ drm_connector_put(connector);
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 40af17ec6312..ff3154fe6588 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int bar_index =
- (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+ struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
- if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
- return -EINVAL;
-
+ /*
+ * Power-up software can determine how much address
+ * space the device requires by writing a value of
+ * all 1's to the register and then reading the value
+ * back. The device will return 0's in all don't-care
+ * address bits.
+ */
if (new == 0xffffffff) {
- /*
- * Power-up software can determine how much address
- * space the device requires by writing a value of
- * all 1's to the register and then reading the value
- * back. The device will return 0's in all don't-care
- * address bits.
- */
- size = vgpu->cfg_space.bar[bar_index].size;
- if (lo) {
- new = rounddown(new, size);
- } else {
- u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
- /* for 32bit mode bar it returns all-0 in upper 32
- * bit, for 64bit mode bar it will calculate the
- * size with lower 32bit and return the corresponding
- * value
+ switch (offset) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
+ intel_vgpu_write_pci_bar(vgpu, offset,
+ size >> (lo ? 0 : 32), lo);
+ /*
+ * Untrap the BAR, since guest hasn't configured a
+ * valid GPA
*/
- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
- new &= (~(size-1)) >> 32;
- else
- new = 0;
- }
- /*
- * Unmapp & untrap the BAR, since guest hasn't configured a
- * valid GPA
- */
- switch (bar_index) {
- case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
- case INTEL_GVT_PCI_BAR_APERTURE:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
+ intel_vgpu_write_pci_bar(vgpu, offset,
+ size >> (lo ? 0 : 32), lo);
ret = map_aperture(vgpu, false);
break;
+ default:
+ /* Unimplemented BARs */
+ intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
}
- intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
} else {
- /*
- * Unmapp & untrap the old BAR first, since guest has
- * re-configured the BAR
- */
- switch (bar_index) {
- case INTEL_GVT_PCI_BAR_GTTMMIO:
- ret = trap_gttmmio(vgpu, false);
+ switch (offset) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ /*
+ * Untrap the old BAR first, since guest has
+ * re-configured the BAR
+ */
+ trap_gttmmio(vgpu, false);
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ ret = trap_gttmmio(vgpu, mmio_enabled);
break;
- case INTEL_GVT_PCI_BAR_APERTURE:
- ret = map_aperture(vgpu, false);
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ map_aperture(vgpu, false);
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ ret = map_aperture(vgpu, mmio_enabled);
break;
- }
- intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
- /* Track the new BAR */
- if (mmio_enabled) {
- switch (bar_index) {
- case INTEL_GVT_PCI_BAR_GTTMMIO:
- ret = trap_gttmmio(vgpu, true);
- break;
- case INTEL_GVT_PCI_BAR_APERTURE:
- ret = map_aperture(vgpu, true);
- break;
- }
+ default:
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
return ret;
@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
switch (rounddown(offset, 4)) {
- case PCI_BASE_ADDRESS_0:
- case PCI_BASE_ADDRESS_1:
- case PCI_BASE_ADDRESS_2:
- case PCI_BASE_ADDRESS_3:
+ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
- int i;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
- for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
- vgpu->cfg_space.bar[i].size = pci_resource_len(
- gvt->dev_priv->drm.pdev, i * 2);
- vgpu->cfg_space.bar[i].tracked = false;
- }
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
+ pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
+ pci_resource_len(gvt->dev_priv->drm.pdev, 2);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 436377da41ba..03532dfc0cd5 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
- int ring_id;
-
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
-
- spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
- }
- }
- spin_unlock_bh(&scheduler->mmio_context_lock);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
+ int ring_id;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->need_reschedule = true;
scheduler->current_vgpu = NULL;
}
+
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ if (scheduler->engine_owner[ring_id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ }
+ spin_unlock_bh(&scheduler->mmio_context_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19404c96eeb1..32e857dc507c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (READ_ONCE(obj->mm.pages))
return -ENODEV;
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
/* Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ unsigned long flags;
+
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO);
- i915_gem_request_submit(request);
+
+ spin_lock_irqsave(&request->engine->timeline->lock, flags);
+ __i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
+ spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
static void engine_set_wedged(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4df039ef2ce3..e161d383b526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,21 +33,20 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- struct intel_timeline *tl;
+ if (i915->gt.active_requests)
+ return false;
- tl = &ggtt->base.timeline.engine[engine->id];
- if (i915_gem_active_isset(&tl->last_request))
- return false;
- }
+ for_each_engine(engine, i915, id) {
+ if (engine->last_retired_context != i915->kernel_context)
+ return false;
+ }
- return true;
+ return true;
}
static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, cache_level,
start, end, mode);
- /* Retire before we search the active list. Although we have
+ /*
+ * Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
@@ -182,7 +182,8 @@ search_again:
BUG_ON(ret);
}
- /* Can we unpin some objects such as idle hw contents,
+ /*
+ * Can we unpin some objects such as idle hw contents,
* or pending flips? But since only the GGTT has global entries
* such as scanouts, rinbuffers and contexts, we can skip the
* purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- if (ggtt_is_idle(dev_priv)) {
- /* If we still have pending pageflip completions, drop
- * back to userspace to give our workqueues time to
- * acquire our locks and unpin the old scanouts.
- */
- return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
- }
+ /*
+ * Not everything in the GGTT is tracked via VMA using
+ * i915_vma_move_to_active(), otherwise we could evict as required
+ * with minimal stalling. Instead we are forced to idle the GPU and
+ * explicitly retire outstanding requests which will then remove
+ * the pinning for active objects such as contexts and ring,
+ * enabling us to evict them on the next iteration.
+ *
+ * To ensure that all user contexts are evictable, we perform
+ * a switch to the perma-pinned kernel context. This all also gives
+ * us a termination condition, when the last retired context is
+ * the kernel's there is no more we can evict.
+ */
+ if (!ggtt_is_idle(dev_priv)) {
+ ret = ggtt_flush(dev_priv);
+ if (ret)
+ return ret;
- ret = ggtt_flush(dev_priv);
- if (ret)
- return ret;
+ goto search_again;
+ }
- goto search_again;
+ /*
+ * If we still have pending pageflip completions, drop
+ * back to userspace to give our workqueues time to
+ * acquire our locks and unpin the old scanouts.
+ */
+ return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e21ce9c18b6e..b63893eeca73 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
- bool in_vbl = true;
unsigned long irqflags;
if (WARN_ON(!mode->crtc_clock)) {
@@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- in_vbl = position >= vbl_start && position < vbl_end;
-
/*
* While in vblank, position will be negative
* counting up towards 0 at vbl_end. And outside
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ed7cd9ee2c2a..c9bcc6c45012 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6998,6 +6998,7 @@ enum {
*/
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
+#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d805b6e6fe71..27743be5b768 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
connector->encoder->base.id,
connector->encoder->name);
- /* ELD Conn_Type */
- connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc_state))
- connector->eld[5] |= (1 << 2);
-
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->display.audio_codec_enable)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 183e87e8ea31..5d4cd3d00564 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
@@ -1233,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
{
enum port port;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
if (!dev_priv->vbt.child_dev_num)
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index ff9ecd211abb..b8315bca852b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,7 +74,7 @@
#define I9XX_CSC_COEFF_1_0 \
((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
{
return !state->degamma_lut &&
!state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
}
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy(state)) {
+ if (!crtc_state_is_legacy_gamma(state)) {
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
return;
}
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
glk_load_degamma_lut(state);
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
return;
}
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
uint32_t i, lut_size;
uint32_t word0, word1;
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
/* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
return 0;
/*
- * We also allow no degamma lut and a gamma lut at the legacy
+ * We also allow no degamma lut/ctm and a gamma lut at the legacy
* size (256 entries).
*/
- if (!crtc_state->degamma_lut &&
- crtc_state->gamma_lut &&
- crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+ if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 965988f79a55..92c1f8e166dc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4b4fd1f8110b..5e5fe03b638c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
- return hsw_ddi_translations_fdi;
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+ return bdw_ddi_translations_fdi;
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
return hsw_ddi_translations_fdi;
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
out:
if (ret && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
- if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+ if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
DRM_ERROR("Port %c enabled but PHY powered down? "
"(PHY_CTL %08x)\n", port_name(port), tmp);
@@ -2101,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
* register writes.
*/
val = I915_READ(DPCLKA_CFGCR0);
- val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
- DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
I915_WRITE(DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f17275519484..5c7828c52d12 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_state *pipe_config;
- int htot = I915_READ(HTOTAL(cpu_transcoder));
- int hsync = I915_READ(HSYNC(cpu_transcoder));
- int vtot = I915_READ(VTOTAL(cpu_transcoder));
- int vsync = I915_READ(VSYNC(cpu_transcoder));
+ u32 htot, hsync, vtot, vsync;
enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
i9xx_crtc_clock_get(intel_crtc, pipe_config);
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ cpu_transcoder = pipe_config->cpu_transcoder;
+ htot = I915_READ(HTOTAL(cpu_transcoder));
+ hsync = I915_READ(HSYNC(cpu_transcoder));
+ vtot = I915_READ(VTOTAL(cpu_transcoder));
+ vsync = I915_READ(VSYNC(cpu_transcoder));
+
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -12359,7 +12363,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- bool hw_check = intel_state->modeset;
u64 put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
int i;
@@ -12376,7 +12379,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (needs_modeset(new_crtc_state) ||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
- hw_check = true;
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc,
@@ -14030,7 +14032,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
DRM_DEBUG_KMS("bad plane %d handle\n", i);
- return -EINVAL;
+ goto err;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 64134947c0aa..203198659ab2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->panel_power_off_time = ktime_get_boottime();
wait_panel_off(intel_dp);
+ intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
vbt.t11_t12);
}
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 09b670929786..de38d014ed39 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
},
};
-static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
-{
- return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
- BIT(phy_info->channel[DPIO_CH0].port);
-}
-
static const struct bxt_ddi_phy_info *
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
{
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- enum port port;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
- for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2a3d93d67bd..df808a94c511 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
- if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1;
I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
/* 4. Reab back to ensure writes completed */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index f0c11aec5ea5..7442891762be 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -903,15 +901,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
intel_panel_disable_backlight(old_conn_state);
/*
- * Disable Device ready before the port shutdown in order
- * to avoid split screen
- */
- if (IS_BROXTON(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports)
- I915_WRITE(MIPI_DEVICE_READY(port), 0);
- }
-
- /*
* According to the spec we should send SHUTDOWN before
* MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
* has shown that the v3 sequence works for v2 VBTs too
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 9ab596941372..3c2d9cf22ed5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
}
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
- I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+ u32 val = I915_READ(GEN8_L3SQCREG1);
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+ I915_WRITE(GEN8_L3SQCREG1, val);
+ }
/* WaToEnableHwFixForPushConstHWBug:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 951e834dd274..28a778b785ac 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,6 +30,21 @@
#include "intel_drv.h"
#include "i915_drv.h"
+static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
+{
+ u8 conn_type;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ conn_type = DRM_ELD_CONN_TYPE_DP;
+ } else {
+ conn_type = DRM_ELD_CONN_TYPE_HDMI;
+ }
+
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
+}
+
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
+ intel_connector_update_eld_conn_type(connector);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a17b1de7d7e0..3b1c5d783ee7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bxt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
@@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bxt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ed662937ec3c..0a09f8ff6aff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
int high_prio_credits)
{
u32 misccpctl;
+ u32 val;
/* WaTempDisableDOPClkGating:bdw */
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GEN8_L3SQCREG1,
- L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
- L3_HIGH_PRIO_CREDITS(high_prio_credits));
+ val = I915_READ(GEN8_L3SQCREG1);
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
+ val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
+ I915_WRITE(GEN8_L3SQCREG1, val);
/*
* Wait at least 100 clocks before re-enabling clock gating.
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b66d8e136aa3..49577eba8e7e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
enum i915_power_well_id id = power_well->id;
bool wait_fuses = power_well->hsw.has_fuses;
- enum skl_power_gate pg;
+ enum skl_power_gate uninitialized_var(pg);
u32 val;
if (wait_fuses) {
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
}
#undef CNL_PROCMON_IDX
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index dbb31a014419..deaf869374ea 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -248,7 +248,7 @@ disable_clks:
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
put_clk:
clk_put(ahb_clk);
put_gdsc:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index c2bdad88447e..824067d2d427 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 3,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6fcb58ab718c..440977677001 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
- pm_runtime_put_autosuspend(&pdev->dev);
-
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f15821a0d900..ea5bb0e1632c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
- if (!exclusive) {
- /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
- * which makes this a slightly strange place to call it. OTOH this
- * is a convenient can-fail point to hook it in. (And similar to
- * how etnaviv and nouveau handle this.)
- */
- ret = reservation_object_reserve_shared(msm_obj->resv);
- if (ret)
- return ret;
- }
-
fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
}
vaddr = msm_gem_get_vaddr(obj);
- if (!vaddr) {
+ if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(vaddr);
}
if (bo)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5d0a75d4b249..93535cac0676 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -221,7 +221,7 @@ fail:
return ret;
}
-static int submit_fence_sync(struct msm_gem_submit *submit)
+static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{
int i, ret = 0;
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
+ if (!write) {
+ /* NOTE: _reserve_shared() must happen before
+ * _add_shared_fence(), which makes this a slightly
+ * strange place to call it. OTOH this is a
+ * convenient can-fail point to hook it in.
+ */
+ ret = reservation_object_reserve_shared(msm_obj->resv);
+ if (ret)
+ return ret;
+ }
+
+ if (no_implicit)
+ continue;
+
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (ret)
break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
- ret = submit_fence_sync(submit);
- if (ret)
- goto out;
- }
+ ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
+ if (ret)
+ goto out;
ret = submit_pin_objects(submit);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ffbff27600e0..6a887032c66a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->aspace) {
+
+ if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0);
msm_gem_address_space_put(gpu->aspace);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0366b8092f97..ec56794ad039 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_SPACE_TO_END() does not access the tail more
+ * than once.
+ */
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
- fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+ smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n;
ptr += n;
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
if (ret)
goto out;
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_CNT_TO_END() does not access the head more than
+ * once.
+ */
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT;
goto out;
}
- fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+ smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n;
wake_up_all(&rd->fifo_event);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f7707849bb53..2b12d82aac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -223,7 +223,7 @@ void
nouveau_fbcon_accel_save_disable(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
+ if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
}
@@ -233,7 +233,7 @@ void
nouveau_fbcon_accel_restore(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
+ if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
}
}
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
struct nouveau_fbdev *fbcon = drm->fbcon;
if (fbcon && drm->channel) {
console_lock();
- fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
console_unlock();
nouveau_channel_idle(drm->channel);
nvif_object_fini(&fbcon->twod);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dbf62a2ac41..e4751f92b342 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3265,11 +3265,14 @@ nv50_mstm = {
void
nv50_mstm_service(struct nv50_mstm *mstm)
{
- struct drm_dp_aux *aux = mstm->mgr.aux;
+ struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
bool handled = true;
int ret;
u8 esi[8] = {};
+ if (!aux)
+ return;
+
while (handled) {
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (ret != 8) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a74774..44e116f7880d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@ int
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{
return nvkm_xtensa_new_(&g84_bsp, device, index,
- true, 0x103000, pengine);
+ device->chipset != 0x92, 0x103000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index d06ad2c372bf..455da298227f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
+ mmu->func->flush(vm);
+
nvkm_memory_del(&pgt);
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 14c5613b4388..afbf50d0c08f 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
.y2 = qfb->base.height
};
- if (!old_state->fb) {
- qxl_io_log(qdev,
- "create primary fb: %dx%d,%d,%d\n",
- bo->surf.width, bo->surf.height,
- bo->surf.stride, bo->surf.format);
+ if (old_state->fb) {
+ qfb_old = to_qxl_framebuffer(old_state->fb);
+ bo_old = gem_to_qxl_bo(qfb_old->obj);
+ } else {
+ bo_old = NULL;
+ }
- qxl_io_create_primary(qdev, 0, bo);
- bo->is_primary = true;
+ if (bo == bo_old)
return;
- } else {
- qfb_old = to_qxl_framebuffer(old_state->fb);
- bo_old = gem_to_qxl_bo(qfb_old->obj);
+ if (bo_old && bo_old->is_primary) {
+ qxl_io_destroy_primary(qdev);
bo_old->is_primary = false;
}
- bo->is_primary = true;
+ if (!bo->is_primary) {
+ qxl_io_create_primary(qdev, 0, bo);
+ bo->is_primary = true;
+ }
qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
}
@@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
{
struct qxl_device *qdev = plane->dev->dev_private;
- if (old_state->fb)
- { struct qxl_framebuffer *qfb =
+ if (old_state->fb) {
+ struct qxl_framebuffer *qfb =
to_qxl_framebuffer(old_state->fb);
struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
- qxl_io_destroy_primary(qdev);
- bo->is_primary = false;
+ if (bo->is_primary) {
+ qxl_io_destroy_primary(qdev);
+ bo->is_primary = false;
+ }
}
}
@@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
- if (!plane->state->fb) {
- /* we never executed prepare_fb, so there's nothing to
+ if (!old_state->fb) {
+ /*
+ * we never executed prepare_fb, so there's nothing to
* unpin.
*/
return;
}
- obj = to_qxl_framebuffer(plane->state->fb)->obj;
+ obj = to_qxl_framebuffer(old_state->fb)->obj;
user_bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(user_bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 997131d58c7f..ffc10cadcf34 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (freeze && rdev->family >= CHIP_CEDAR) {
+ if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
pci_restore_state(dev->pdev);
} else if (suspend) {
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 06f05302ee75..882d85db9053 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC
bool "Allwinner A10 HDMI CEC Support"
depends on DRM_SUN4I_HDMI
select CEC_CORE
- depends on CEC_PIN
+ select CEC_PIN
help
Choose this option if you have an Allwinner SoC with an HDMI
controller and want to use CEC.
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 1457750988da..a1f8cba251a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -15,7 +15,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
-#include <media/cec.h>
+#include <media/cec-pin.h>
#define SUN4I_HDMI_CTRL_REG 0x004
#define SUN4I_HDMI_CTRL_ENABLE BIT(31)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9ea6cd5a1370..3cf1a6932fac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(hdmi->mod_clk)) {
dev_err(dev, "Couldn't get the HDMI mod clock\n");
- return PTR_ERR(hdmi->mod_clk);
+ ret = PTR_ERR(hdmi->mod_clk);
+ goto err_disable_bus_clk;
}
clk_prepare_enable(hdmi->mod_clk);
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
if (IS_ERR(hdmi->pll0_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
- return PTR_ERR(hdmi->pll0_clk);
+ ret = PTR_ERR(hdmi->pll0_clk);
+ goto err_disable_mod_clk;
}
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
if (IS_ERR(hdmi->pll1_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
- return PTR_ERR(hdmi->pll1_clk);
+ ret = PTR_ERR(hdmi->pll1_clk);
+ goto err_disable_mod_clk;
}
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
- return ret;
+ goto err_disable_mod_clk;
}
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
- return ret;
+ goto err_disable_mod_clk;
}
drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
drm_encoder_cleanup(&hdmi->encoder);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
+err_disable_mod_clk:
+ clk_disable_unprepare(hdmi->mod_clk);
+err_disable_bus_clk:
+ clk_disable_unprepare(hdmi->bus_clk);
return ret;
}
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
+ clk_disable_unprepare(hdmi->mod_clk);
+ clk_disable_unprepare(hdmi->bus_clk);
}
static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h
index e9b7cdad5c4c..5a1ab4046e92 100644
--- a/drivers/gpu/drm/tegra/trace.h
+++ b/drivers/gpu/drm/tegra/trace.h
@@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6a573d21d3cc..658fa2d3e40c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
return -EINVAL;
}
+ /*
+ * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
+ * i.MX53 channel arbitration locking doesn't seem to work properly.
+ * Allow enabling the lock feature on IPUv3H / i.MX6 only.
+ */
+ if (bursts && ipu->ipu_type != IPUV3H)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
if (channel->num == idmac_lock_en_info[i].chnum)
break;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c35f74c83065..c860a7997cb5 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -73,6 +73,14 @@
#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
+#define IPU_PRE_STORE_ENG_STATUS 0x120
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
+#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
+#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
+
#define IPU_PRE_STORE_ENG_SIZE 0x130
#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
dma_addr_t buffer_paddr;
void *buffer_virt;
bool in_use;
+ unsigned int safe_window_end;
};
static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
u32 active_bpp = info->cpp[0] >> 1;
u32 val;
+ /* calculate safe window for ctrl register updates */
+ pre->safe_window_end = height - 2;
+
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(5);
+ unsigned short current_yblock;
+ u32 val;
+
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
+ return;
+ }
+
+ val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
+ current_yblock =
+ (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
+ IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
+ } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
+
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index ecc9ea44dc50..0013ca9f72c8 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -14,6 +14,7 @@
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+ /* wait for both double buffers to be filled */
+ readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
+ (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
+ (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
+ 5, 1000);
+
clk_disable_unprepare(prg->clk_ipg);
chan->enabled = true;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 0a3117cc29e7..374301fcbc86 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -281,6 +281,7 @@ config HID_ELECOM
Support for ELECOM devices:
- BM084 Bluetooth Mouse
- DEFT Trackball (Wired and wireless)
+ - HUGE Trackball (Wired and wireless)
config HID_ELO
tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9bc91160819b..330ca983828b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e2c7465df69f..54aeea57d209 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
*/
/*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
- /* The DEFT trackball has eight buttons, but its descriptor only
- * reports five, disabling the three Fn buttons on the top of
- * the mouse.
+ case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
+ case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
+ /* The DEFT/HUGE trackball has eight buttons, but its descriptor
+ * only reports five, disabling the three Fn buttons on the top
+ * of the mouse.
*
* Apply the following diff to the descriptor:
*
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* End Collection, End Collection,
*/
if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+ hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
rdesc[13] = 8; /* Button/Variable Report Count */
rdesc[21] = 8; /* Button/Variable Usage Maximum */
rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b397a14ab970..be2e005c3c51 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -368,6 +368,8 @@
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
@@ -533,6 +535,7 @@
#define USB_VENDOR_ID_IDEACOM 0x1cb6
#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
+#define USB_DEVICE_ID_IDEACOM_IDC6680 0x6680
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -660,6 +663,7 @@
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
+#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 440b999304a5..9e8c4d2ba11d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->application != HID_DG_PEN &&
field->application != HID_DG_TOUCHPAD &&
field->application != HID_GD_KEYBOARD &&
+ field->application != HID_GD_SYSTEM_CONTROL &&
field->application != HID_CP_CONSUMER_CONTROL &&
field->application != HID_GD_WIRELESS_RADIO_CTLS &&
!(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
@@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = {
USB_VENDOR_ID_ALPS_JP,
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
+ /* Lenovo X1 TAB Gen 2 */
+ { .driver_data = MT_CLS_WIN_8_DUAL,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X1_TAB) },
+
/* Anton devices */
{ .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 5b40c2614599..ef241d66562e 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
if (!(data->device_flags & RMI_DEVICE))
return 0;
- ret = rmi_reset_attn_mode(hdev);
+ /* Make sure the HID device is ready to receive events */
+ ret = hid_hw_open(hdev);
if (ret)
return ret;
+ ret = rmi_reset_attn_mode(hdev);
+ if (ret)
+ goto out;
+
ret = rmi_driver_resume(rmi_dev, false);
if (ret) {
hid_warn(hdev, "Failed to resume device: %d\n", ret);
- return ret;
+ goto out;
}
- return 0;
+out:
+ hid_hw_close(hdev);
+ return ret;
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index ec530454e6f6..5fbe0f81ab2e 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
kfree(hidraw);
} else {
/* close device for last reader */
- hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
hid_hw_close(hidraw->hid);
+ hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
}
}
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 77396145d2d0..9145c2129a96 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
{
/* the worst case is computed from the set_report command with a
* reportID > 15 and the maximum report length */
- int args_len = sizeof(__u8) + /* optional ReportID byte */
+ int args_len = sizeof(__u8) + /* ReportID */
+ sizeof(__u8) + /* optional ReportID byte */
sizeof(__u16) + /* data register */
sizeof(__u16) + /* size of the report */
report_size; /* report */
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 089bad8a9a21..045b5da9b992 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
unsigned int rsize = 0;
char *rdesc;
int ret, n;
+ int num_descriptors;
+ size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
+ if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+ dbg_hid("hid descriptor is too short\n");
+ return -EINVAL;
+ }
+
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- for (n = 0; n < hdesc->bNumDescriptors; n++)
+ num_descriptors = min_t(int, hdesc->bNumDescriptors,
+ (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+ for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a83fa76655b9..f489a5cfcb48 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index e82a696a1d07..906e654fb0ba 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
/* Try to find an already-probed interface from the same device */
list_for_each_entry(data, &wacom_udev_list, list) {
- if (compare_device_paths(hdev, data->dev, '/'))
+ if (compare_device_paths(hdev, data->dev, '/')) {
+ kref_get(&data->kref);
return data;
+ }
}
/* Fallback to finding devices that appear to be "siblings" */
@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
if (!wacom->led.groups)
return -ENOTSUPP;
+ if (wacom->wacom_wac.features.type == REMOTE)
+ return -ENOTSUPP;
+
if (wacom->wacom_wac.pid) { /* wireless connected */
report_id = WAC_CMD_WL_LED_CONTROL;
buf_size = 13;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bb17d7bbefd3..aa692e28b2cd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
keys = data[9] & 0x07;
}
} else {
- buttons = ((data[6] & 0x10) << 10) |
- ((data[5] & 0x10) << 9) |
+ buttons = ((data[6] & 0x10) << 5) |
+ ((data[5] & 0x10) << 4) |
((data[6] & 0x0F) << 4) |
(data[5] & 0x0F);
}
@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
continue;
if (range) {
+ /* Fix rotation alignment: userspace expects zero at left */
+ int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
+ rotation += 1800/4;
+ if (rotation > 899)
+ rotation -= 1800;
+
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
- input_report_abs(pen_input, ABS_TILT_X, frame[7]);
- input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
- input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
+ input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
+ input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
+ input_report_abs(pen_input, ABS_Z, rotation);
input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
}
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
unsigned char *data = wacom->data;
int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
- int ring = data[285];
- int prox = buttons | (ring & 0x80);
+ int ring = data[285] & 0x7F;
+ bool ringstatus = data[285] & 0x80;
+ bool prox = buttons || ringstatus;
+
+ /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
+ ring = 71 - ring;
+ ring += 3*72/16;
+ if (ring > 71)
+ ring -= 72;
wacom_report_numbered_buttons(pad_input, 9, buttons);
- input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
+ input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
+ int value, int num, int denom)
+{
+ struct input_absinfo *abs = &input->absinfo[usage->code];
+ int range = (abs->maximum - abs->minimum + 1);
+
+ value += num*range/denom;
+ if (value > abs->maximum)
+ value -= range;
+ else if (value < abs->minimum)
+ value += range;
+ return value;
+}
+
int wacom_equivalent_usage(int usage)
{
if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
int i;
bool is_touch_on = value;
+ bool do_report = false;
/*
* Avoid reporting this event and setting inrange_state if this usage
@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
}
switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRING:
+ /*
+ * Userspace expects touchrings to increase in value with
+ * clockwise gestures and have their zero point at the
+ * tablet's left. HID events "should" be clockwise-
+ * increasing and zero at top, though the MobileStudio
+ * Pro and 2nd-gen Intuos Pro don't do this...
+ */
+ if (hdev->vendor == 0x56a &&
+ (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
+ hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
+ value = (field->logical_maximum - value);
+
+ if (hdev->product == 0x357 || hdev->product == 0x358)
+ value = wacom_offset_rotation(input, usage, value, 3, 16);
+ else if (hdev->product == 0x34d || hdev->product == 0x34e)
+ value = wacom_offset_rotation(input, usage, value, 1, 2);
+ }
+ else {
+ value = wacom_offset_rotation(input, usage, value, 1, 4);
+ }
+ do_report = true;
+ break;
case WACOM_HID_WD_TOUCHRINGSTATUS:
if (!value)
input_event(input, usage->type, usage->code, 0);
@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
value, i);
/* fall through*/
default:
+ do_report = true;
+ break;
+ }
+
+ if (do_report) {
input_event(input, usage->type, usage->code, value);
if (value)
wacom_wac->hid_data.pad_input_event_flag = true;
- break;
}
}
@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
wacom_wac->hid_data.tipswitch |= value;
return;
case HID_DG_TOOLSERIALNUMBER:
- wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
- wacom_wac->serial[0] |= (__u32)value;
+ if (value) {
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= (__u32)value;
+ }
return;
+ case HID_DG_TWIST:
+ /*
+ * Userspace expects pen twist to have its zero point when
+ * the buttons/finger is on the tablet's left. HID values
+ * are zero when buttons are toward the top.
+ */
+ value = wacom_offset_rotation(input, usage, value, 1, 4);
+ break;
case WACOM_HID_WD_SENSE:
wacom_wac->hid_data.sense_state = value;
return;
case WACOM_HID_WD_SERIALHI:
- wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
- wacom_wac->serial[0] |= ((__u64)value) << 32;
- /*
- * Non-USI EMR devices may contain additional tool type
- * information here. See WACOM_HID_WD_TOOLTYPE case for
- * more details.
- */
- if (value >> 20 == 1) {
- wacom_wac->id[0] |= value & 0xFFFFF;
+ if (value) {
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
}
return;
case WACOM_HID_WD_TOOLTYPE:
@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
input_report_key(input, wacom_wac->tool[0], prox);
if (wacom_wac->serial[0]) {
input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
- input_report_abs(input, ABS_MISC, id);
+ input_report_abs(input, ABS_MISC, prox ? id : 0);
}
wacom_wac->hid_data.tipswitch = false;
@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
if (!prox) {
wacom_wac->tool[0] = 0;
wacom_wac->id[0] = 0;
+ wacom_wac->serial[0] = 0;
}
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index efd5db743319..894b67ac2cae 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
*/
return;
}
+ mutex_lock(&vmbus_connection.channel_mutex);
/*
* Close all the sub-channels first and then close the
* primary channel.
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
vmbus_close_internal(cur_channel);
if (cur_channel->rescind) {
- mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(cur_channel,
+ hv_process_channel_removal(
cur_channel->offermsg.child_relid);
- mutex_unlock(&vmbus_connection.channel_mutex);
}
}
/*
* Now close the primary.
*/
vmbus_close_internal(channel);
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
EXPORT_SYMBOL_GPL(vmbus_close);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 060df71c2e8b..379b0df123be 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-
+ channel->rescind = true;
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
true);
}
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
+void hv_process_channel_removal(u32 relid)
{
unsigned long flags;
- struct vmbus_channel *primary_channel;
+ struct vmbus_channel *primary_channel, *channel;
- BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+ /*
+ * Make sure channel is valid as we may have raced.
+ */
+ channel = relid2channel(relid);
+ if (!channel)
+ return;
+
+ BUG_ON(!channel->rescind);
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) {
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
+ newchannel->probe_done = true;
return;
}
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
- unsigned long flags;
struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
return;
}
- spin_lock_irqsave(&channel->lock, flags);
- channel->rescind = true;
- spin_unlock_irqrestore(&channel->lock, flags);
-
- /*
- * Now that we have posted the rescind state, perform
- * rescind related cleanup.
- */
- vmbus_rescind_cleanup(channel);
-
/*
* Now wait for offer handling to complete.
*/
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
+ vmbus_rescind_cleanup(channel);
return;
}
/*
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
*/
dev = get_device(&channel->device_obj->device);
if (dev) {
+ vmbus_rescind_cleanup(channel);
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
@@ -921,29 +920,28 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* 1. Close all sub-channels first
* 2. Then close the primary channel.
*/
+ mutex_lock(&vmbus_connection.channel_mutex);
+ vmbus_rescind_cleanup(channel);
if (channel->state == CHANNEL_OPEN_STATE) {
/*
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
- mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
- mutex_unlock(&vmbus_connection.channel_mutex);
+ hv_process_channel_removal(rescind->child_relid);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
{
- mutex_lock(&vmbus_connection.channel_mutex);
-
BUG_ON(!is_hvsock_channel(channel));
- channel->rescind = true;
- vmbus_device_unregister(channel->device_obj);
+ /* We always get a rescind msg when a connection is closed. */
+ while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
+ msleep(1);
- mutex_unlock(&vmbus_connection.channel_mutex);
+ vmbus_device_unregister(channel->device_obj);
}
EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index daa75bd41f86..2364281d8593 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy)
out_src = smsg_out;
break;
+ case WRITE_TO_FILE:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = sizeof(struct hv_do_fcopy);
+ break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a9d49f6f6501..937801ac2fe0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
struct vmbus_channel *channel = hv_dev->channel;
mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
+ hv_process_channel_removal(channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev);
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 9c0dbb8191ad..e1be61095532 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
GFP_KERNEL);
if (rc)
- goto out_mbox_free;
+ return -ENOMEM;
INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(ctx->mbox_chan)) {
dev_err(&pdev->dev,
"SLIMpro mailbox channel request failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(ctx->mbox_chan)) {
dev_err(&pdev->dev,
"PPC channel request failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
/*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (!cppc_ss) {
dev_err(&pdev->dev, "PPC subspace not found\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
/*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
if (!ctx->pcc_comm_addr) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
- goto out_mbox_free;
+ goto out;
}
/*
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index bc9cebc30526..c2a2ce8ee541 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -144,6 +144,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)0,
},
{
+ /* Lewisburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -158,6 +163,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Cedar Fork PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 9414900575d8..f129869e05a9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
stm_source_link_drop(src);
- device_destroy(&stm_source_class, src->dev.devt);
+ device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c06dce2c1da7..45a3f3ca29b3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
Gemini Lake (SOC)
Cannon Lake-H (PCH)
Cannon Lake-LP (PCH)
+ Cedar Fork (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e114e4e00d29..9e12a53ef7b8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
* Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
+ * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -204,6 +205,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
+#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 84fb35f6837f..eb1d91b986fd 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = {
};
module_platform_driver(img_scb_i2c_driver);
-MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
MODULE_DESCRIPTION("IMG host I2C driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 54a47b40546f..f96830ffd9f1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
}
dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
- rinfo->sda_gpio, rinfo->scl_gpio);
+ rinfo->scl_gpio, rinfo->sda_gpio);
rinfo->prepare_recovery = i2c_imx_prepare_recovery;
rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
}
/* Request IRQ */
- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 22ffcb73c185..b51adffa4841 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
data->word = dma_buffer[0] | (dma_buffer[1] << 8);
break;
case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_I2C_BLOCK_DATA:
if (desc->rxbytes != dma_buffer[0] + 1)
return -EMSGSIZE;
memcpy(data->block, dma_buffer, desc->rxbytes);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+ data->block[0] = desc->rxbytes;
+ break;
}
return 0;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1ebb5e947e0b..23c2ea2baedc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
unsigned long fclk_rate = 12000000;
unsigned long internal_clk = 0;
struct clk *fclk;
+ int error;
if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
/*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
* do this bit unconditionally.
*/
fclk = clk_get(omap->dev, "fck");
+ if (IS_ERR(fclk)) {
+ error = PTR_ERR(fclk);
+ dev_err(omap->dev, "could not get fck: %i\n", error);
+
+ return error;
+ }
+
fclk_rate = clk_get_rate(fclk);
clk_put(fclk);
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
else
internal_clk = 4000;
fclk = clk_get(omap->dev, "fck");
+ if (IS_ERR(fclk)) {
+ error = PTR_ERR(fclk);
+ dev_err(omap->dev, "could not get fck: %i\n", error);
+
+ return error;
+ }
fclk_rate = clk_get_rate(fclk) / 1000;
clk_put(fclk);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 0ecdb47a23ab..174579d32e5f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,6 +85,9 @@
/* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6
+#define KERNCZ_IMC_IDX 0x3e
+#define KERNCZ_IMC_DATA 0x3f
+
/*
* SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
* or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
@@ -94,6 +97,12 @@
#define SB800_PIIX4_PORT_IDX_ALT 0x2e
#define SB800_PIIX4_PORT_IDX_SEL 0x2f
#define SB800_PIIX4_PORT_IDX_MASK 0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT 1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
/* insmod parameters */
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
*/
static DEFINE_MUTEX(piix4_mutex_sb800);
static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
" port 0", " port 2", " port 3", " port 4"
};
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
/* SB800 */
bool sb800_main;
+ bool notify_imc;
u8 port; /* Port number, shifted */
};
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Find which register is used for port selection */
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
- piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ switch (PIIX4_dev->device) {
+ case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+ break;
+ case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+ default:
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+ break;
+ }
} else {
mutex_lock(&piix4_mutex_sb800);
outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_port_sel_sb800 = (port_sel & 0x01) ?
SB800_PIIX4_PORT_IDX_ALT :
SB800_PIIX4_PORT_IDX;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
mutex_unlock(&piix4_mutex_sb800);
}
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
return 0;
}
+static uint8_t piix4_imc_read(uint8_t idx)
+{
+ outb_p(idx, KERNCZ_IMC_IDX);
+ return inb_p(KERNCZ_IMC_DATA);
+}
+
+static void piix4_imc_write(uint8_t idx, uint8_t value)
+{
+ outb_p(idx, KERNCZ_IMC_IDX);
+ outb_p(value, KERNCZ_IMC_DATA);
+}
+
+static int piix4_imc_sleep(void)
+{
+ int timeout = MAX_TIMEOUT;
+
+ if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+ return -EBUSY;
+
+ /* clear response register */
+ piix4_imc_write(0x82, 0x00);
+ /* request ownership flag */
+ piix4_imc_write(0x83, 0xB4);
+ /* kick off IMC Mailbox command 96 */
+ piix4_imc_write(0x80, 0x96);
+
+ while (timeout--) {
+ if (piix4_imc_read(0x82) == 0xfa) {
+ release_region(KERNCZ_IMC_IDX, 2);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ release_region(KERNCZ_IMC_IDX, 2);
+ return -ETIMEDOUT;
+}
+
+static void piix4_imc_wakeup(void)
+{
+ int timeout = MAX_TIMEOUT;
+
+ if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+ return;
+
+ /* clear response register */
+ piix4_imc_write(0x82, 0x00);
+ /* release ownership flag */
+ piix4_imc_write(0x83, 0xB5);
+ /* kick off IMC Mailbox command 96 */
+ piix4_imc_write(0x80, 0x96);
+
+ while (timeout--) {
+ if (piix4_imc_read(0x82) == 0xfa)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ release_region(KERNCZ_IMC_IDX, 2);
+}
+
/*
* Handles access to multiple SMBus ports on the SB800.
* The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
return -EBUSY;
}
+ /*
+ * Notify the IMC (Integrated Micro Controller) if required.
+ * Among other responsibilities, the IMC is in charge of monitoring
+ * the System fans and temperature sensors, and act accordingly.
+ * All this is done through SMBus and can/will collide
+ * with our transactions if they are long (BLOCK_DATA).
+ * Therefore we need to request the ownership flag during those
+ * transactions.
+ */
+ if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
+ int ret;
+
+ ret = piix4_imc_sleep();
+ switch (ret) {
+ case -EBUSY:
+ dev_warn(&adap->dev,
+ "IMC base address index region 0x%x already in use.\n",
+ KERNCZ_IMC_IDX);
+ break;
+ case -ETIMEDOUT:
+ dev_warn(&adap->dev,
+ "Failed to communicate with the IMC.\n");
+ break;
+ default:
+ break;
+ }
+
+ /* If IMC communication fails do not retry */
+ if (ret) {
+ dev_warn(&adap->dev,
+ "Continuing without IMC notification.\n");
+ adapdata->notify_imc = false;
+ }
+ }
+
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
port = adapdata->port;
- if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
- outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+ if ((smba_en_lo & piix4_port_mask_sb800) != port)
+ outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
SB800_PIIX4_SMB_IDX + 1);
retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+ if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
+ piix4_imc_wakeup();
+
mutex_unlock(&piix4_mutex_sb800);
return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
static struct i2c_adapter *piix4_aux_adapter;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
- bool sb800_main, u8 port,
+ bool sb800_main, u8 port, bool notify_imc,
const char *name, struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
adapdata->smba = smba;
adapdata->sb800_main = sb800_main;
- adapdata->port = port << 1;
+ adapdata->port = port << piix4_port_shift_sb800;
+ adapdata->notify_imc = notify_imc;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
return 0;
}
-static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
+static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
+ bool notify_imc)
{
struct i2c_piix4_adapdata *adapdata;
int port;
int retval;
for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
- retval = piix4_add_adapter(dev, smba, true, port,
+ retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
piix4_main_port_names_sb800[port],
&piix4_main_adapters[port]);
if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
dev->vendor == PCI_VENDOR_ID_AMD) {
+ bool notify_imc = false;
is_sb800 = true;
if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -EBUSY;
}
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
+ u8 imc;
+
+ /*
+ * Detect if IMC is active or not, this method is
+ * described on coreboot's AMD IMC notes
+ */
+ pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
+ 0x40, &imc);
+ if (imc & 0x80)
+ notify_imc = true;
+ }
+
/* base address location etc changed in SB800 */
retval = piix4_setup_sb800(dev, id, 0);
if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
* Try to register multiplexed main SMBus adapter,
* give up if we can't
*/
- retval = piix4_add_adapters_sb800(dev, retval);
+ retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
if (retval < 0) {
release_region(SB800_PIIX4_SMB_IDX, 2);
return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return retval;
/* Try to register main SMBus adapter, give up if we can't */
- retval = piix4_add_adapter(dev, retval, false, 0, "",
+ retval = piix4_add_adapter(dev, retval, false, 0, false, "",
&piix4_main_adapters[0]);
if (retval < 0)
return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (retval > 0) {
/* Try to add the aux adapter if it exists,
* piix4_add_adapter will clean up if this fails */
- piix4_add_adapter(dev, retval, false, 0,
+ piix4_add_adapter(dev, retval, false, 0, false,
is_sb800 ? piix4_aux_port_name_sb800 : "",
&piix4_aux_adapter);
}
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 22e08ae1704f..25fcc3c1e32b 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
static const struct of_device_id sprd_i2c_of_match[] = {
{ .compatible = "sprd,sc9860-i2c", },
+ {},
};
static struct platform_driver sprd_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 47c67b0ca896..d4a6e9c2e9aa 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
unsigned int msg_num;
unsigned int msg_id;
struct stm32f7_i2c_msg f7_msg;
- struct stm32f7_i2c_setup *setup;
+ struct stm32f7_i2c_setup setup;
struct stm32f7_i2c_timings timing;
};
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
},
};
-struct stm32f7_i2c_setup stm32f7_setup = {
+static const struct stm32f7_i2c_setup stm32f7_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
.dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
/* Enable I2C */
- if (i2c_dev->setup->analog_filter)
+ if (i2c_dev->setup.analog_filter)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
setup = of_device_get_match_data(&pdev->dev);
- i2c_dev->setup->rise_time = setup->rise_time;
- i2c_dev->setup->fall_time = setup->fall_time;
- i2c_dev->setup->dnf = setup->dnf;
- i2c_dev->setup->analog_filter = setup->analog_filter;
+ i2c_dev->setup = *setup;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
&rise_time);
if (!ret)
- i2c_dev->setup->rise_time = rise_time;
+ i2c_dev->setup.rise_time = rise_time;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
&fall_time);
if (!ret)
- i2c_dev->setup->fall_time = fall_time;
+ i2c_dev->setup.fall_time = fall_time;
- ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
+ ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
goto clk_free;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 01b2adfd8226..eaf39e5db08b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
if (hwif_init(hwif) == 0) {
printk(KERN_INFO "%s: failed to initialize IDE "
"interface\n", hwif->name);
+ device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);
ide_disable_port(hwif);
continue;
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 86aa88aeb3a6..acf874800ca4 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
{
struct list_head *l;
struct pci_driver *d;
+ int ret;
list_for_each(l, &ide_pci_drivers) {
d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
const struct pci_device_id *id =
pci_match_id(d->id_table, dev);
- if (id != NULL && d->probe(dev, id) >= 0) {
- dev->driver = d;
- pci_dev_get(dev);
- return 1;
+ if (id != NULL) {
+ pci_assign_irq(dev);
+ ret = d->probe(dev, id);
+ if (ret >= 0) {
+ dev->driver = d;
+ pci_dev_get(dev);
+ return 1;
+ }
}
}
}
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 112d2fe1bcdb..fdc8e813170c 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
/**
* ide_pci_enable - do PCI enables
* @dev: PCI device
+ * @bars: PCI BARs mask
* @d: IDE port info
*
* Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
* Returns zero on success or an error code
*/
-static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+static int ide_pci_enable(struct pci_dev *dev, int bars,
+ const struct ide_port_info *d)
{
- int ret, bars;
+ int ret;
if (pci_enable_device(dev)) {
ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
goto out;
}
- if (d->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (d->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
ret = pci_request_selected_regions(dev, bars, d->name);
if (ret < 0)
printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
/**
* ide_setup_pci_controller - set up IDE PCI
* @dev: PCI device
+ * @bars: PCI BARs mask
* @d: IDE port info
* @noisy: verbose flag
*
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
* and enables it if need be
*/
-static int ide_setup_pci_controller(struct pci_dev *dev,
+static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
const struct ide_port_info *d, int noisy)
{
int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
if (noisy)
ide_setup_pci_noise(dev, d);
- ret = ide_pci_enable(dev, d);
+ ret = ide_pci_enable(dev, bars, d);
if (ret < 0)
goto out;
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
if (ret < 0) {
printk(KERN_ERR "%s %s: error accessing PCI regs\n",
d->name, pci_name(dev));
- goto out;
+ goto out_free_bars;
}
if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
ret = ide_pci_configure(dev, d);
if (ret < 0)
- goto out;
+ goto out_free_bars;
printk(KERN_INFO "%s %s: device enabled (Linux)\n",
d->name, pci_name(dev));
}
+ goto out;
+
+out_free_bars:
+ pci_release_selected_regions(dev, bars);
out:
return ret;
}
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
{
struct pci_dev *pdev[] = { dev1, dev2 };
struct ide_host *host;
- int ret, i, n_ports = dev2 ? 4 : 2;
+ int ret, i, n_ports = dev2 ? 4 : 2, bars;
struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ if (d->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (d->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
for (i = 0; i < n_ports / 2; i++) {
- ret = ide_setup_pci_controller(pdev[i], d, !i);
- if (ret < 0)
+ ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
+ if (ret < 0) {
+ if (i == 1)
+ pci_release_selected_regions(pdev[0], bars);
goto out;
+ }
ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
}
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
host = ide_host_alloc(d, hws, n_ports);
if (host == NULL) {
ret = -ENOMEM;
- goto out;
+ goto out_free_bars;
}
host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
* do_ide_setup_pci_device() on the first device!
*/
if (ret < 0)
- goto out;
+ goto out_free_bars;
/* fixup IRQ */
if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
ret = ide_host_register(host, d, hws);
if (ret)
ide_host_free(host);
+ else
+ goto out;
+
+out_free_bars:
+ i = n_ports / 2;
+ while (i--)
+ pci_release_selected_regions(pdev[i], bars);
out:
return ret;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 57625653fcb6..1d13bf03c758 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -243,6 +243,8 @@ config DA9150_GPADC
config DLN2_ADC
tristate "Diolan DLN-2 ADC driver support"
depends on MFD_DLN2
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Diolan DLN-2 ADC.
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index e6706a09e100..47c3d7f32900 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
unsigned int vref_mv)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int i, ret = -1;
+ int i, ret;
unsigned long long scale_uv;
u32 id;
@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
return ret;
/* reset the serial interface */
- ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+ ret = ad_sd_reset(&st->sd, 32);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d10bd0c97233..22c4c17cd996 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -177,6 +177,34 @@ out:
}
EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+/**
+ * ad_sd_reset() - Reset the serial interface
+ *
+ * @sigma_delta: The sigma delta device
+ * @reset_length: Number of SCLKs with DIN = 1
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length)
+{
+ uint8_t *buf;
+ unsigned int size;
+ int ret;
+
+ size = DIV_ROUND_UP(reset_length, 8);
+ buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memset(buf, 0xff, size);
+ ret = spi_write(sigma_delta->spi, buf, size);
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_reset);
+
static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index bc5b38e3a147..a70ef7fec95f 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -225,6 +225,7 @@ struct at91_adc_trigger {
char *name;
unsigned int trgmod_value;
unsigned int edge_type;
+ bool hw_trig;
};
struct at91_adc_state {
@@ -254,16 +255,25 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = {
.name = "external_rising",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
.edge_type = IRQ_TYPE_EDGE_RISING,
+ .hw_trig = true,
},
{
.name = "external_falling",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
.edge_type = IRQ_TYPE_EDGE_FALLING,
+ .hw_trig = true,
},
{
.name = "external_any",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
.edge_type = IRQ_TYPE_EDGE_BOTH,
+ .hw_trig = true,
+ },
+ {
+ .name = "software",
+ .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER,
+ .edge_type = IRQ_TYPE_NONE,
+ .hw_trig = false,
},
};
@@ -597,7 +607,7 @@ static int at91_adc_probe(struct platform_device *pdev)
struct at91_adc_state *st;
struct resource *res;
int ret, i;
- u32 edge_type;
+ u32 edge_type = IRQ_TYPE_NONE;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
if (!indio_dev)
@@ -641,14 +651,14 @@ static int at91_adc_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node,
"atmel,trigger-edge-type", &edge_type);
if (ret) {
- dev_err(&pdev->dev,
- "invalid or missing value for atmel,trigger-edge-type\n");
- return ret;
+ dev_dbg(&pdev->dev,
+ "atmel,trigger-edge-type not specified, only software trigger available\n");
}
st->selected_trig = NULL;
- for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++)
+ /* find the right trigger, or no trigger at all */
+ for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
if (at91_adc_trigger_list[i].edge_type == edge_type) {
st->selected_trig = &at91_adc_trigger_list[i];
break;
@@ -717,24 +727,27 @@ static int at91_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
- ret = at91_adc_buffer_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
- goto per_clk_disable_unprepare;
- }
+ if (st->selected_trig->hw_trig) {
+ ret = at91_adc_buffer_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+ goto per_clk_disable_unprepare;
+ }
- ret = at91_adc_trigger_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't setup the triggers.\n");
- goto per_clk_disable_unprepare;
+ ret = at91_adc_trigger_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't setup the triggers.\n");
+ goto per_clk_disable_unprepare;
+ }
}
ret = iio_device_register(indio_dev);
if (ret < 0)
goto per_clk_disable_unprepare;
- dev_info(&pdev->dev, "setting up trigger as %s\n",
- st->selected_trig->name);
+ if (st->selected_trig->hw_trig)
+ dev_info(&pdev->dev, "setting up trigger as %s\n",
+ st->selected_trig->name);
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 634717ae12f3..071dd23a33d9 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -17,6 +17,8 @@
* MCP3204
* MCP3208
* ------------
+ * 13 bit converter
+ * MCP3301
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
}
static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
- bool differential, int device_index)
+ bool differential, int device_index, int *val)
{
int ret;
@@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
switch (device_index) {
case mcp3001:
- return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ return 0;
case mcp3002:
case mcp3004:
case mcp3008:
- return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ return 0;
case mcp3201:
- return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ return 0;
case mcp3202:
case mcp3204:
case mcp3208:
- return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ return 0;
case mcp3301:
- return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
+ *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
+ | adc->rx_buf[1], 12);
+ return 0;
default:
return -EINVAL;
}
@@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp320x_adc_conversion(adc, channel->address,
- channel->differential, device_index);
-
+ channel->differential, device_index, val);
if (ret < 0)
goto out;
- *val = ret;
ret = IIO_VAL_INT;
break;
@@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
+ spi_set_drvdata(spi, indio_dev);
chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index e3c15f88075f..4df32cf1650e 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1666,7 +1666,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
num_channels = of_property_count_u32_elems(node, "st,adc-channels");
if (num_channels < 0 ||
- num_channels >= adc_info->max_channels) {
+ num_channels > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
return num_channels < 0 ? num_channels : -EINVAL;
}
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index d1210024f6bc..e0dc20488335 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -52,7 +52,7 @@
#define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0)
#define ADS1015_CFG_COMP_LAT_MASK BIT(2)
-#define ADS1015_CFG_COMP_POL_MASK BIT(2)
+#define ADS1015_CFG_COMP_POL_MASK BIT(3)
#define ADS1015_CFG_COMP_MODE_MASK BIT(4)
#define ADS1015_CFG_DR_MASK GENMASK(7, 5)
#define ADS1015_CFG_MOD_MASK BIT(8)
@@ -1017,10 +1017,12 @@ static int ads1015_probe(struct i2c_client *client,
switch (irq_trig) {
case IRQF_TRIGGER_LOW:
- cfg_comp |= ADS1015_CFG_COMP_POL_LOW;
+ cfg_comp |= ADS1015_CFG_COMP_POL_LOW <<
+ ADS1015_CFG_COMP_POL_SHIFT;
break;
case IRQF_TRIGGER_HIGH:
- cfg_comp |= ADS1015_CFG_COMP_POL_HIGH;
+ cfg_comp |= ADS1015_CFG_COMP_POL_HIGH <<
+ ADS1015_CFG_COMP_POL_SHIFT;
break;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 1edd99f0c5e5..e3cfb91bffc6 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -887,21 +887,27 @@ static int twl4030_madc_probe(struct platform_device *pdev)
/* Enable 3v1 bias regulator for MADC[3:6] */
madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
- if (IS_ERR(madc->usb3v1))
- return -ENODEV;
+ if (IS_ERR(madc->usb3v1)) {
+ ret = -ENODEV;
+ goto err_i2c;
+ }
ret = regulator_enable(madc->usb3v1);
- if (ret)
+ if (ret) {
dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
+ goto err_i2c;
+ }
ret = iio_device_register(iio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio device\n");
- goto err_i2c;
+ goto err_usb3v1;
}
return 0;
+err_usb3v1:
+ regulator_disable(madc->usb3v1);
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index d99bb1460fe2..02e833b14db0 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -463,8 +463,17 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
u8 drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor_settings->drdy_irq.addr)
+ if (!sdata->sensor_settings->drdy_irq.addr) {
+ /*
+ * there are some devices (e.g. LIS3MDL) where drdy line is
+ * routed to a given pin and it is not possible to select a
+ * different one. Take into account irq status register
+ * to understand if irq trigger can be properly supported
+ */
+ if (sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ sdata->hw_irq_trigger = enable;
return 0;
+ }
/* Enable/Disable the interrupt generator 1. */
if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index ed63ffd849f8..7ec2a0bb0807 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
st->event_en = state;
else
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 17ec4cee51dc..a47428b4d31b 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
- if (ret)
+ if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+ return ret;
+ }
len = snprintf(buf, sizeof(buf), "0x%X\n", val);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e68368b5b2a3..08aafba4481c 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -315,6 +315,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
},
},
+ .drdy_irq = {
+ /* drdy line is routed drdy pin */
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
.multi_read_bit = true,
.bootime = 2,
},
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 0d2ea3ee371b..8f26428804a2 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -573,7 +573,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
- ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
BMP280_OSRS_PRESS_MASK |
BMP280_MODE_MASK,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index ebfb1de7377f..91431454eb85 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -865,7 +865,6 @@ complete:
static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
struct zpa2326_private *private)
{
- int ret;
unsigned int val;
long timeout;
@@ -887,14 +886,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
/* Timed out. */
zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
timeout);
- ret = -ETIME;
- } else if (timeout < 0) {
- zpa2326_warn(indio_dev,
- "wait for one shot interrupt cancelled");
- ret = -ERESTARTSYS;
+ return -ETIME;
}
- return ret;
+ zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled");
+ return -ERESTARTSYS;
}
static int zpa2326_init_managed_irq(struct device *parent,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 0eeff29b61be..4a48b7ba3a1c 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -39,8 +39,12 @@
#define AS3935_AFE_GAIN_MAX 0x1F
#define AS3935_AFE_PWR_BIT BIT(0)
+#define AS3935_NFLWDTH 0x01
+#define AS3935_NFLWDTH_MASK 0x7f
+
#define AS3935_INT 0x03
#define AS3935_INT_MASK 0x0f
+#define AS3935_DISTURB_INT BIT(2)
#define AS3935_EVENT_INT BIT(3)
#define AS3935_NOISE_INT BIT(0)
@@ -48,6 +52,7 @@
#define AS3935_DATA_MASK 0x3F
#define AS3935_TUNE_CAP 0x08
+#define AS3935_DEFAULTS 0x3C
#define AS3935_CALIBRATE 0x3D
#define AS3935_READ_DATA BIT(14)
@@ -62,7 +67,9 @@ struct as3935_state {
struct mutex lock;
struct delayed_work work;
+ unsigned long noise_tripped;
u32 tune_cap;
+ u32 nflwdth_reg;
u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
u8 buf[2] ____cacheline_aligned;
};
@@ -145,12 +152,29 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
return len;
}
+static ssize_t as3935_noise_level_tripped_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct as3935_state *st = iio_priv(dev_to_iio_dev(dev));
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sprintf(buf, "%d\n", !time_after(jiffies, st->noise_tripped + HZ));
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
+static IIO_DEVICE_ATTR(noise_level_tripped, S_IRUGO,
+ as3935_noise_level_tripped_show, NULL, 0);
static struct attribute *as3935_attributes[] = {
&iio_dev_attr_sensor_sensitivity.dev_attr.attr,
+ &iio_dev_attr_noise_level_tripped.dev_attr.attr,
NULL,
};
@@ -246,7 +270,11 @@ static void as3935_event_work(struct work_struct *work)
case AS3935_EVENT_INT:
iio_trigger_poll_chained(st->trig);
break;
+ case AS3935_DISTURB_INT:
case AS3935_NOISE_INT:
+ mutex_lock(&st->lock);
+ st->noise_tripped = jiffies;
+ mutex_unlock(&st->lock);
dev_warn(&st->spi->dev, "noise level is too high\n");
break;
}
@@ -269,15 +297,14 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
static void calibrate_as3935(struct as3935_state *st)
{
- /* mask disturber interrupt bit */
- as3935_write(st, AS3935_INT, BIT(5));
-
+ as3935_write(st, AS3935_DEFAULTS, 0x96);
as3935_write(st, AS3935_CALIBRATE, 0x96);
as3935_write(st, AS3935_TUNE_CAP,
BIT(5) | (st->tune_cap / TUNE_CAP_DIV));
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
+ as3935_write(st, AS3935_NFLWDTH, st->nflwdth_reg);
}
#ifdef CONFIG_PM_SLEEP
@@ -370,6 +397,15 @@ static int as3935_probe(struct spi_device *spi)
return -EINVAL;
}
+ ret = of_property_read_u32(np,
+ "ams,nflwdth", &st->nflwdth_reg);
+ if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) {
+ dev_err(&spi->dev,
+ "invalid nflwdth setting of %d\n",
+ st->nflwdth_reg);
+ return -EINVAL;
+ }
+
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->channels = as3935_channels;
@@ -384,6 +420,7 @@ static int as3935_probe(struct spi_device *spi)
return -ENOMEM;
st->trig = trig;
+ st->noise_tripped = jiffies - HZ;
trig->dev.parent = indio_dev->dev.parent;
iio_trigger_set_drvdata(trig, indio_dev);
trig->ops = &iio_interrupt_trigger_ops;
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 9b9053494daf..eb212f8c8879 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -174,6 +174,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
clk_disable(priv->clk);
/* Stop timer */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
regmap_write(priv->regmap, TIM_PSC, 0);
regmap_write(priv->regmap, TIM_ARR, 0);
@@ -715,8 +716,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
if (ret)
return ret;
+ /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_write(priv->regmap, TIM_ARR, preset);
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
return len;
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30825bb9b8e9..8861c052155a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
if (ret)
goto pid_query_error;
+ nlmsg_end(skb, nlh);
+
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
if (ret)
goto add_mapping_error;
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
if (ret)
goto query_mapping_error;
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
if (ret)
goto remove_mapping_error;
+ nlmsg_end(skb, nlh);
+
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index c81c55942626..3c4faadb8cdd 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
if (ret)
goto mapinfo_num_error;
+
+ nlmsg_end(skb, nlh);
+
ret = rdma_nl_unicast(skb, iwpm_pid);
if (ret) {
skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
if (ret)
goto send_mapping_info_unlock;
+ nlmsg_end(skb, nlh);
+
iwpm_print_sockaddr(&map_info->local_sockaddr,
"send_mapping_info: Local sockaddr:");
iwpm_print_sockaddr(&map_info->mapped_sockaddr,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 70ad19c4c73e..88bdafb297f5 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
atomic_set(&qp->qp_sec->error_list_count, 0);
init_completion(&qp->qp_sec->error_complete);
ret = security_ib_alloc_security(&qp->qp_sec->security);
- if (ret)
+ if (ret) {
kfree(qp->qp_sec);
+ qp->qp_sec = NULL;
+ }
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4ab30d832ac5..52a2cf2d83aa 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.raw_packet_caps = attr.raw_packet_caps;
resp.response_length += sizeof(resp.raw_packet_caps);
- if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps))
+ if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
goto end;
- resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size;
- resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags;
- resp.xrq_caps.max_ops = attr.xrq_caps.max_ops;
- resp.xrq_caps.max_sge = attr.xrq_caps.max_sge;
- resp.xrq_caps.flags = attr.xrq_caps.flags;
- resp.response_length += sizeof(resp.xrq_caps);
+ resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
+ resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
+ resp.tm_caps.max_ops = attr.tm_caps.max_ops;
+ resp.tm_caps.max_sge = attr.tm_caps.max_sge;
+ resp.tm_caps.flags = attr.tm_caps.flags;
+ resp.response_length += sizeof(resp.tm_caps);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ee9e27dc799b..de57d6c11a25 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
*/
if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
if (attr.qp_state >= IB_QPS_INIT) {
- if (qp->device->get_link_layer(qp->device, attr.port_num) !=
+ if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
IB_LINK_LAYER_INFINIBAND)
return true;
goto lid_check;
@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
/* Can't get a quick answer, iterate over all ports */
for (port = 0; port < qp->device->phys_port_cnt; port++)
- if (qp->device->get_link_layer(qp->device, port) !=
+ if (rdma_port_get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND)
num_eth_ports++;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b3ad37fec578..ecbac91b2e14 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -93,11 +93,13 @@ struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
unsigned long flags;
-#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
-#define BNXT_RE_FLAG_GOT_MSIX 2
-#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8
-#define BNXT_RE_FLAG_QOS_WORK_REG 16
+#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
+#define BNXT_RE_FLAG_GOT_MSIX 2
+#define BNXT_RE_FLAG_HAVE_L2_REF 3
+#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
+#define BNXT_RE_FLAG_QOS_WORK_REG 5
+#define BNXT_RE_FLAG_TASK_IN_PROG 6
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
@@ -108,6 +110,8 @@ struct bnxt_re_dev {
struct delayed_work worker;
u8 cur_prio_map;
+ u8 active_speed;
+ u8 active_width;
/* FP Notification Queue (CQ & SRQ) */
struct tasklet_struct nq_task;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 01eee15bbd65..0d89621d9fe8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->sm_sl = 0;
port_attr->subnet_timeout = 0;
port_attr->init_type_reply = 0;
- /* call the underlying netdev's ethtool hooks to query speed settings
- * for which we acquire rtnl_lock _only_ if it's registered with
- * IB stack to avoid race in the NETDEV_UNREG path
- */
- if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
- &port_attr->active_width))
- return -EINVAL;
+ port_attr->active_speed = rdev->active_speed;
+ port_attr->active_width = rdev->active_width;
+
return 0;
}
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid *gid_to_del;
/* Delete the entry from the hardware */
ctx = *context;
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
+ gid_to_del = &sgid_tbl->tbl[ctx->idx];
+ /* DEL_GID is called in WQ context(netdevice_event_work_handler)
+ * or via the ib_unregister_device path. In the former case QP1
+ * may not be destroyed yet, in which case just return as FW
+ * needs that entry to be present and will fail it's deletion.
+ * We could get invoked again after QP1 is destroyed OR get an
+ * ADD_GID call with a different GID value for the same index
+ * where we issue MODIFY_GID cmd to update the GID entry -- TBD
+ */
+ if (ctx->idx == 0 &&
+ rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
+ ctx->refcnt == 1 && rdev->qp1_sqp) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Trying to delete GID0 while QP1 is alive\n");
+ return -EFAULT;
+ }
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid(sgid_tbl,
- &sgid_tbl->tbl[ctx->idx],
- true);
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
kfree(rdev->sqp_ah);
kfree(rdev->qp1_sqp);
+ rdev->qp1_sqp = NULL;
+ rdev->sqp_ah = NULL;
}
if (!IS_ERR_OR_NULL(qp->rumem))
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
+ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
} else if (qp_attr->qp_state == IB_QPS_RTR) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu =
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
+ qp->qplib_qp.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_qplib_qp qplib_qp;
+ struct bnxt_qplib_qp *qplib_qp;
int rc;
- memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
- qplib_qp.id = qp->qplib_qp.id;
- qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
+ qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
+ if (!qplib_qp)
+ return -ENOMEM;
+
+ qplib_qp->id = qp->qplib_qp.id;
+ qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
- rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
+ rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
- return rc;
+ goto out;
}
- qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
- qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
- qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
- qp_attr->pkey_index = qplib_qp.pkey_index;
- qp_attr->qkey = qplib_qp.qkey;
+ qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+ qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
+ qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
+ qp_attr->pkey_index = qplib_qp->pkey_index;
+ qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
- qplib_qp.ah.host_sgid_index,
- qplib_qp.ah.hop_limit,
- qplib_qp.ah.traffic_class);
- rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
- rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
- ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
- qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
- qp_attr->timeout = qplib_qp.timeout;
- qp_attr->retry_cnt = qplib_qp.retry_cnt;
- qp_attr->rnr_retry = qplib_qp.rnr_retry;
- qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
- qp_attr->rq_psn = qplib_qp.rq.psn;
- qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
- qp_attr->sq_psn = qplib_qp.sq.psn;
- qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
- qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
- IB_SIGNAL_REQ_WR;
- qp_attr->dest_qp_num = qplib_qp.dest_qpn;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
+ qplib_qp->ah.host_sgid_index,
+ qplib_qp->ah.hop_limit,
+ qplib_qp->ah.traffic_class);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
+ rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
+ ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
+ qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
+ qp_attr->timeout = qplib_qp->timeout;
+ qp_attr->retry_cnt = qplib_qp->retry_cnt;
+ qp_attr->rnr_retry = qplib_qp->rnr_retry;
+ qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->rq_psn = qplib_qp->rq.psn;
+ qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
+ qp_attr->sq_psn = qplib_qp->sq.psn;
+ qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
+ qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
+ IB_SIGNAL_REQ_WR;
+ qp_attr->dest_qp_num = qplib_qp->dest_qpn;
qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
qp_init_attr->cap = qp_attr->cap;
- return 0;
+out:
+ kfree(qplib_qp);
+ return rc;
}
/* Routine for sending QP1 packets for RoCE V1 an V2
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
+ wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
wqe->atomic.swap_data = atomic_wr(wr)->swap;
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
return rc;
}
- if (mr->npages && mr->pages) {
+ if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl);
kfree(mr->pages);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 82d1cbc27aee..e7450ea92aa9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
}
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+ ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+ &rdev->active_width);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work)
else if (netif_carrier_ok(rdev->netdev))
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
IB_EVENT_PORT_ACTIVE);
+ ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+ &rdev->active_width);
break;
default:
break;
}
+ smp_mb__before_atomic();
+ clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
kfree(re_work);
}
@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
+ /* netdev notifier will call NETDEV_UNREGISTER again later since
+ * we are still holding the reference to the netdev
+ */
+ if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
+ goto exit;
bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
+ set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
queue_work(bnxt_re_wq, &re_work->work);
}
}
@@ -1375,6 +1387,22 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
+ struct bnxt_re_dev *rdev;
+ LIST_HEAD(to_be_deleted);
+
+ mutex_lock(&bnxt_re_dev_lock);
+ /* Free all adapter allocated resources */
+ if (!list_empty(&bnxt_re_dev_list))
+ list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
+ mutex_unlock(&bnxt_re_dev_lock);
+
+ list_for_each_entry(rdev, &to_be_deleted, list) {
+ dev_info(rdev_to_dev(rdev), "Unregistering Device");
+ bnxt_re_dev_stop(rdev);
+ bnxt_re_ib_unreg(rdev, true);
+ bnxt_re_remove_one(rdev);
+ bnxt_re_dev_unreg(rdev);
+ }
unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
if (bnxt_re_wq)
destroy_workqueue(bnxt_re_wq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 391bb7006e8f..2bdb1562bd21 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
return -EINVAL;
}
+ if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
+ return -ETIMEDOUT;
+
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+ set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 0ed312f17c8d..85b16da287f9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw {
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
-#define FIRMWARE_INITIALIZED_FLAG 1
+#define FIRMWARE_INITIALIZED_FLAG BIT(0)
#define FIRMWARE_FIRST_FLAG BIT(31)
+#define FIRMWARE_TIMED_OUT BIT(3)
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ceaa2fa54d32..daf7a56e5d7e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_TID(rpl);
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
+ if (!ep) {
+ pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
+ }
pr_debug("%s ep %p\n", __func__, ep);
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
+out:
return 0;
}
@@ -2594,9 +2599,9 @@ fail:
c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
+out:
if (parent_ep)
c4iw_put_ep(&parent_ep->com);
-out:
return 0;
}
@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
-
+ remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b2ed4b9cda6e..0be42787759f 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
static int thermal_init(struct hfi1_devdata *dd);
static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+ int msecs);
static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs);
static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
u64 regs[CCE_NUM_INT_CSRS];
u32 bit;
int i;
+ irqreturn_t handled = IRQ_NONE;
this_cpu_inc(*dd->int_counter);
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
for_each_set_bit(bit, (unsigned long *)&regs[0],
CCE_NUM_INT_CSRS * 64) {
is_interrupt(dd, bit);
+ handled = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return handled;
}
static irqreturn_t sdma_interrupt(int irq, void *data)
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
}
-void reset_qsfp(struct hfi1_pportdata *ppd)
+int reset_qsfp(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
u64 mask, qsfp_mask;
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
* for alarms and warnings
*/
set_qsfp_int_n(ppd, 1);
+
+ /*
+ * After the reset, AOC transmitters are enabled by default. They need
+ * to be turned off to complete the QSFP setup before they can be
+ * enabled again.
+ */
+ return set_qsfp_tx(ppd, 0);
}
static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
{
struct hfi1_devdata *dd = ppd->dd;
u32 previous_state;
+ int offline_state_ret;
int ret;
update_lcb_cache(dd);
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
- /*
- * Wait for offline transition. It can take a while for
- * the link to go down.
- */
- ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
- if (ret < 0)
- return ret;
-
- /*
- * Now in charge of LCB - must be after the physical state is
- * offline.quiet and before host_link_state is changed.
- */
- set_host_lcb_access(dd);
- write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
-
- /* make sure the logical state is also down */
- ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
- if (ret)
- force_logical_link_state_down(ppd);
-
- ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+ offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
+ if (offline_state_ret < 0)
+ return offline_state_ret;
+ /* Disabling AOC transmitters */
if (ppd->port_type == PORT_TYPE_QSFP &&
ppd->qsfp_info.limiting_active &&
qsfp_mod_present(ppd)) {
@@ -10365,6 +10360,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
}
/*
+ * Wait for the offline.Quiet transition if it hasn't happened yet. It
+ * can take a while for the link to go down.
+ */
+ if (offline_state_ret != PLS_OFFLINE_QUIET) {
+ ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Now in charge of LCB - must be after the physical state is
+ * offline.quiet and before host_link_state is changed.
+ */
+ set_host_lcb_access(dd);
+ write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+
+ /* make sure the logical state is also down */
+ ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
+ if (ret)
+ force_logical_link_state_down(ppd);
+
+ ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+
+ /*
* The LNI has a mandatory wait time after the physical state
* moves to Offline.Quiet. The wait time may be different
* depending on how the link went down. The 8051 firmware
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
/* went down while attempting link up */
check_lni_states(ppd);
+
+ /* The QSFP doesn't need to be reset on LNI failure */
+ ppd->qsfp_info.reset_needed = 0;
}
/* the active link width (downgrade) is 0 on link down */
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
return 0;
}
+/*
+ * wait_phys_link_offline_quiet_substates - wait for any offline substate
+ * @ppd: port device
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for any offline physical link
+ * state change to occur.
+ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
+ */
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if ((read_state & 0xF0) == PLS_OFFLINE)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, read_state);
+ return read_state;
+}
+
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index b8345a60a0fb..50b8645d0b87 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -204,6 +204,7 @@
#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
#define PLS_OFFLINE_REPORT_FAILURE 0x93
#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
+#define PLS_OFFLINE_QUIET_DURATION 0x95
#define PLS_POLLING 0x20
#define PLS_POLLING_QUIET 0x20
#define PLS_POLLING_ACTIVE 0x21
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
void handle_link_bounce(struct work_struct *work);
void handle_start_link(struct work_struct *work);
void handle_sma_message(struct work_struct *work);
-void reset_qsfp(struct hfi1_pportdata *ppd);
+int reset_qsfp(struct hfi1_pportdata *ppd);
void qsfp_event(struct work_struct *work);
void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
int send_idle_sma(struct hfi1_devdata *dd, u64 message);
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index d46b17107901..1613af1c58d9 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -204,7 +204,10 @@ done_asic:
return ret;
}
-/* magic character sequence that trails an image */
+/* magic character sequence that begins an image */
+#define IMAGE_START_MAGIC "APO="
+
+/* magic character sequence that might trail an image */
#define IMAGE_TRAIL_MAGIC "egamiAPO"
/* EPROM file types */
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
{
void *buffer;
void *p;
+ u32 length;
int ret;
buffer = kmalloc(P1_SIZE, GFP_KERNEL);
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
return ret;
}
- /* scan for image magic that may trail the actual data */
- p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
- if (!p) {
+ /* config partition is valid only if it starts with IMAGE_START_MAGIC */
+ if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
kfree(buffer);
return -ENOENT;
}
+ /* scan for image magic that may trail the actual data */
+ p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
+ if (p)
+ length = p - buffer;
+ else
+ length = P1_SIZE;
+
*data = buffer;
- *size = p - buffer;
+ *size = length;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2bc89260235a..d9a1e9893136 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
switch (ret) {
case 0:
ret = setup_base_ctxt(fd, uctxt);
- if (uctxt->subctxt_cnt) {
- /*
- * Base context is done (successfully or not), notify
- * anybody using a sub-context that is waiting for
- * this completion.
- */
- clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
- wake_up(&uctxt->wait);
- }
+ if (ret)
+ deallocate_ctxt(uctxt);
break;
case 1:
ret = complete_subctxt(fd);
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
/* Now allocate the RcvHdr queue and eager buffers. */
ret = hfi1_create_rcvhdrq(dd, uctxt);
if (ret)
- return ret;
+ goto done;
ret = hfi1_setup_eagerbufs(uctxt);
if (ret)
- goto setup_failed;
+ goto done;
/* If sub-contexts are enabled, do the appropriate setup */
if (uctxt->subctxt_cnt)
ret = setup_subctxt(uctxt);
if (ret)
- goto setup_failed;
+ goto done;
ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
if (ret)
- goto setup_failed;
+ goto done;
ret = init_user_ctxt(fd, uctxt);
if (ret)
- goto setup_failed;
+ goto done;
user_init(uctxt);
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
fd->uctxt = uctxt;
hfi1_rcd_get(uctxt);
- return 0;
+done:
+ if (uctxt->subctxt_cnt) {
+ /*
+ * On error, set the failed bit so sub-contexts will clean up
+ * correctly.
+ */
+ if (ret)
+ set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
-setup_failed:
- /* Set the failed bit so sub-context init can do the right thing */
- set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
- deallocate_ctxt(uctxt);
+ /*
+ * Base context is done (successfully or not), notify anybody
+ * using a sub-context that is waiting for this completion.
+ */
+ clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
+ wake_up(&uctxt->wait);
+ }
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 82447b7cdda1..09e50fd2a08f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -68,7 +68,7 @@
/*
* Code to adjust PCIe capabilities.
*/
-static int tune_pcie_caps(struct hfi1_devdata *);
+static void tune_pcie_caps(struct hfi1_devdata *);
/*
* Do all the common PCIe setup and initialization.
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
*/
int request_msix(struct hfi1_devdata *dd, u32 msireq)
{
- int nvec, ret;
+ int nvec;
nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
return nvec;
}
- ret = tune_pcie_caps(dd);
- if (ret) {
- dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
- pci_free_irq_vectors(dd->pcidev);
- return ret;
- }
+ tune_pcie_caps(dd);
/* check for legacy IRQ */
if (nvec == 1 && !dd->pcidev->msix_enabled)
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
module_param_named(aspm, aspm_mode, uint, S_IRUGO);
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-static int tune_pcie_caps(struct hfi1_devdata *dd)
+static void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
* Turn on extended tags in DevCtl in case the BIOS has turned it off
* to improve WFR SDMA bandwidth
*/
- ret = pcie_capability_read_word(dd->pcidev,
- PCI_EXP_DEVCTL, &ectl);
- if (ret) {
- dd_dev_err(dd, "Unable to read from PCI config\n");
- return ret;
- }
-
- if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
dd_dev_info(dd, "Enabling PCIe extended tags\n");
ectl |= PCI_EXP_DEVCTL_EXT_TAG;
ret = pcie_capability_write_word(dd->pcidev,
PCI_EXP_DEVCTL, ectl);
- if (ret) {
- dd_dev_err(dd, "Unable to write to PCI config\n");
- return ret;
- }
+ if (ret)
+ dd_dev_info(dd, "Unable to write to PCI config\n");
}
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
* The driver cannot perform the tuning if it does not have
* access to the upstream component.
*/
- if (!parent)
- return -EINVAL;
+ if (!parent) {
+ dd_dev_info(dd, "Parent not found\n");
+ return;
+ }
if (!pci_is_root_bus(parent->bus)) {
dd_dev_info(dd, "Parent not root\n");
- return -EINVAL;
+ return;
+ }
+ if (!pci_is_pcie(parent)) {
+ dd_dev_info(dd, "Parent is not PCI Express capable\n");
+ return;
+ }
+ if (!pci_is_pcie(dd->pcidev)) {
+ dd_dev_info(dd, "PCI device is not PCI Express capable\n");
+ return;
}
-
- if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return -EINVAL;
rc_mpss = parent->pcie_mpss;
rc_mps = ffs(pcie_get_mps(parent)) - 8;
/* Find out supported and configured values for endpoint (us) */
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
ep_mrrs = max_mrrs;
pcie_set_readrq(dd->pcidev, ep_mrrs);
}
-
- return 0;
}
/* End of PCIe capability tuning */
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index a8af96d2b1b0..d486355880cb 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
* reuse of stale settings established in our previous pass through.
*/
if (ppd->qsfp_info.reset_needed) {
- reset_qsfp(ppd);
+ ret = reset_qsfp(ppd);
+ if (ret)
+ return ret;
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
} else {
ppd->qsfp_info.reset_needed = 1;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 9b1566468744..a65e4cbdce2f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -201,7 +201,6 @@ enum init_completion_state {
CEQ_CREATED,
ILQ_CREATED,
IEQ_CREATED,
- INET_NOTIFIER,
IP_ADDR_REGISTERED,
RDMA_DEV_REGISTERED
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 14f36ba4e5be..5230dd3c938c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
}
/**
- * listen_port_in_use - determine if port is in use
- * @port: Listen port number
+ * i40iw_port_in_use - determine if port is in use
+ * @port: port number
+ * @active_side: flag for listener side vs active side
*/
-static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
+static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
{
struct i40iw_cm_listener *listen_node;
+ struct i40iw_cm_node *cm_node;
unsigned long flags;
bool ret = false;
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- if (listen_node->loc_port == port) {
- ret = true;
- break;
+ if (active_side) {
+ /* search connected node list */
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_entry(cm_node, &cm_core->connected_nodes, list) {
+ if (cm_node->loc_port == port) {
+ ret = true;
+ break;
+ }
+ }
+ if (!ret)
+ clear_bit(port, cm_core->active_side_ports);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ } else {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (listen_node->loc_port == port) {
+ ret = true;
+ break;
+ }
}
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
return ret;
}
@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
if (listener->iwdev) {
- if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
+ if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
i40iw_manage_apbvt(listener->iwdev,
listener->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
if (cm_node->listener) {
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
- if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
- cm_node->apbvt_set) {
+ if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL);
- i40iw_get_addr_info(cm_node, &nfo);
- if (cm_node->qhash_set) {
- i40iw_manage_qhash(cm_node->iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- cm_node->qhash_set = 0;
- }
+ cm_node->apbvt_set = 0;
+ }
+ i40iw_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ cm_node->qhash_set = 0;
}
}
@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
tcp_info->insert_vlan_tag = true;
- tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
+ tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id);
}
if (cm_node->ipv4) {
tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
- bool qhash_set = false;
- int apbvt_set = 0;
- int err = 0;
- enum i40iw_status_code status;
+ int ret = 0;
+ unsigned long flags;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_info.user_pri = rt_tos2priority(cm_id->tos);
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
__func__, cm_id->tos, cm_info.user_pri);
- if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
- (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
- raddr6->sin6_addr.in6_u.u6_addr32,
- sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
- status = i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- NULL,
- true);
- if (status)
- return -EINVAL;
- qhash_set = true;
- }
- status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
- if (status) {
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- return -EINVAL;
- }
-
- apbvt_set = 1;
cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
conn_param->private_data_len,
@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info);
if (IS_ERR(cm_node)) {
- err = PTR_ERR(cm_node);
- goto err_out;
+ ret = PTR_ERR(cm_node);
+ cm_id->rem_ref(cm_id);
+ return ret;
+ }
+
+ if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+ (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+ raddr6->sin6_addr.in6_u.u6_addr32,
+ sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
+ if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ cm_node->qhash_set = true;
}
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+ if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ }
+
+ cm_node->apbvt_set = true;
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
!cm_node->ord_size)
cm_node->ord_size = 1;
- cm_node->apbvt_set = apbvt_set;
- cm_node->qhash_set = qhash_set;
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
cm_node->state = I40IW_CM_STATE_SYN_SENT;
- err = i40iw_send_syn(cm_node, 0);
- if (err) {
- i40iw_rem_ref_cm_node(cm_node);
- goto err_out;
- }
+ ret = i40iw_send_syn(cm_node, 0);
+ if (ret)
+ goto err;
}
i40iw_debug(cm_node->dev,
@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->rem_port,
cm_node,
cm_node->cm_id);
+
return 0;
-err_out:
+err:
if (cm_info.ipv4)
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
@@ -3867,22 +3879,10 @@ err_out:
"Api - connect() FAILED: dest addr=%pI6",
cm_info.rem_addr);
- if (qhash_set)
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
-
- if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
- cm_info.loc_port))
- i40iw_manage_apbvt(iwdev,
- cm_info.loc_port,
- I40IW_MANAGE_APBVT_DEL);
+ i40iw_rem_ref_cm_node(cm_node);
cm_id->rem_ref(cm_id);
iwdev->cm_core.stats_connect_errs++;
- return err;
+ return ret;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 2e52e38ffcf3..45abef76295b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -71,6 +71,9 @@
#define I40IW_HW_IRD_SETTING_32 32
#define I40IW_HW_IRD_SETTING_64 64
+#define MAX_PORTS 65536
+#define I40IW_VLAN_PRIO_SHIFT 13
+
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
@@ -411,6 +414,8 @@ struct i40iw_cm_core {
spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */
+ unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
+
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
u64 stats_listen_created;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d1f5345f04f0..42ca5346777d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -48,7 +48,7 @@
* @wqe: cqp wqe for header
* @header: header for the cqp wqe
*/
-static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
{
wmb(); /* make sure WQE is populated before polarity is set */
set_64bit_val(wqe, 24, header);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index cc742c3132c6..27590ae21881 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
-static atomic_t i40iw_notifiers_registered;
-
/**
* i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info
@@ -1376,11 +1374,20 @@ error:
*/
static void i40iw_register_notifiers(void)
{
- if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
- register_inetaddr_notifier(&i40iw_inetaddr_notifier);
- register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
- register_netevent_notifier(&i40iw_net_notifier);
- }
+ register_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ register_netevent_notifier(&i40iw_net_notifier);
+}
+
+/**
+ * i40iw_unregister_notifiers - unregister tcp ip notifiers
+ */
+
+static void i40iw_unregister_notifiers(void)
+{
+ unregister_netevent_notifier(&i40iw_net_notifier);
+ unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
}
/**
@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
u32 i;
u32 size;
+ if (!ldev->msix_count) {
+ i40iw_pr_err("No MSI-X vectors\n");
+ return I40IW_ERR_CONFIG;
+ }
+
iwdev->msix_count = ldev->msix_count;
size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
- case INET_NOTIFIER:
- if (!atomic_dec_return(&i40iw_notifiers_registered)) {
- unregister_netevent_notifier(&i40iw_net_notifier);
- unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
- unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
- }
/* fallthrough */
case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
status = i40iw_save_msix_info(iwdev, ldev);
if (status)
- goto exit;
+ return status;
iwdev->hw.dev_context = (void *)ldev->pcidev;
iwdev->hw.hw_addr = ldev->hw_addr;
status = i40iw_allocate_dma_mem(&iwdev->hw,
@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
break;
iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
- i40iw_register_notifiers();
- iwdev->init_state = INET_NOTIFIER;
status = i40iw_add_mac_ip(iwdev);
if (status)
break;
@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void)
i40iw_client.type = I40E_CLIENT_IWARP;
spin_lock_init(&i40iw_handler_lock);
ret = i40e_register_client(&i40iw_client);
+ i40iw_register_notifiers();
+
return ret;
}
@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void)
*/
static void __exit i40iw_exit_module(void)
{
+ i40iw_unregister_notifiers();
i40e_unregister_client(&i40iw_client);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index e217a1259f57..5498ad01c280 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info,
bool post_sq);
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
+
/* HMC/FPM functions */
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c2cab20c4bc5..59f70676f0e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
get_64bit_val(wqe, 24, &offset24);
offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, offset24);
set_64bit_val(wqe, 0, buf->mem.pa);
set_64bit_val(wqe, 8,
LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
- set_64bit_val(wqe, 24, offset24);
+ i40iw_insert_wqe_hdr(wqe, offset24);
}
/**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]);
- /* Ensure all data is written before writing valid bit */
- wmb();
- set_64bit_val(wqe, 24, header[1]);
+ i40iw_insert_wqe_hdr(wqe, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
wqe, I40IW_CQP_WQE_SIZE * 8);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 62f1f45b8737..e52dbbb4165e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
return NOTIFY_DONE;
iwdev = &hdl->device;
- if (iwdev->init_state < INET_NOTIFIER)
+ if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE;
netdev = iwdev->ldev->netdev;
@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_DONE;
iwdev = &hdl->device;
- if (iwdev->init_state < INET_NOTIFIER)
+ if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE;
netdev = iwdev->ldev->netdev;
@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
if (!iwhdl)
return NOTIFY_DONE;
iwdev = &iwhdl->device;
- if (iwdev->init_state < INET_NOTIFIER)
+ if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE;
p = (__be32 *)neigh->primary_key;
i40iw_copy_ip_ntohl(local_ipaddr, p);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1aa411034a27..62be0a41ad0b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ attr->port_num = 1;
init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap;
+ init_attr->port_num = 1;
return 0;
}
@@ -1027,7 +1029,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
iwqp->last_aeq = I40IW_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
+ i40iw_cm_disconn(iwqp);
}
+ } else {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->cm_id->add_ref(iwqp->cm_id);
+ i40iw_schedule_cm_timer(iwqp->cm_node,
+ (struct i40iw_puda_buf *)iwqp,
+ I40IW_TIMER_TYPE_CLOSE, 1, 0);
+ }
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
}
}
return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ab3c562d5ba7..552f7bd4ecc3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (MLX5_CAP_GEN(mdev, tag_matching)) {
- props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
- props->xrq_caps.max_num_tags =
+ props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ props->tm_caps.max_num_tags =
(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
- props->xrq_caps.flags = IB_TM_CAP_RC;
- props->xrq_caps.max_ops =
+ props->tm_caps.flags = IB_TM_CAP_RC;
+ props->tm_caps.max_ops =
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- props->xrq_caps.max_sge = MLX5_TM_MAX_SGE;
+ props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
}
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg)
return -ENOMEM;
+ dev->delay_drop.dbg = dbg;
+
dbg->dir_debugfs =
debugfs_create_dir("delay_drop",
dev->mdev->priv.dbg_root);
if (!dbg->dir_debugfs)
- return -ENOMEM;
+ goto out_debugfs;
dbg->events_cnt_debugfs =
debugfs_create_atomic_t("num_timeout_events", 0400,
@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg->timeout_debugfs)
goto out_debugfs;
- dev->delay_drop.dbg = dbg;
-
return 0;
out_debugfs:
@@ -4174,9 +4174,9 @@ err_bfreg:
err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-err_cnt:
- mlx5_ib_cleanup_cong_debugfs(dev);
err_cong:
+ mlx5_ib_cleanup_cong_debugfs(dev);
+err_cnt:
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 914f212e7ef6..f3dbd75a0a96 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
{
unsigned long tmp;
unsigned long m;
- int i, k;
- u64 base = 0;
- int p = 0;
- int skip;
- int mask;
- u64 len;
- u64 pfn;
+ u64 base = ~0, p = 0;
+ u64 len, pfn;
+ int i = 0;
struct scatterlist *sg;
int entry;
unsigned long page_shift = umem->page_shift;
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
m = find_first_bit(&tmp, BITS_PER_LONG);
if (max_page_shift)
m = min_t(unsigned long, max_page_shift - page_shift, m);
- skip = 1 << m;
- mask = skip - 1;
- i = 0;
+
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> page_shift;
pfn = sg_dma_address(sg) >> page_shift;
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- tmp = (unsigned long)pfn;
- m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- } else {
- if (base + p != pfn) {
- tmp = (unsigned long)p;
- m = find_first_bit(&tmp, BITS_PER_LONG);
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- }
- }
- p++;
- i++;
+ if (base + p != pfn) {
+ /* If either the offset or the new
+ * base are unaligned update m
+ */
+ tmp = (unsigned long)(pfn | p);
+ if (!IS_ALIGNED(tmp, 1 << m))
+ m = find_first_bit(&tmp, BITS_PER_LONG);
+
+ base = pfn;
+ p = 0;
}
+
+ p += len;
+ i += len;
}
if (i) {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 0e2789d9bb4d..37bbc543847a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -47,7 +47,8 @@ enum {
#define MLX5_UMR_ALIGN 2048
-static int clean_mr(struct mlx5_ib_mr *mr);
+static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
update_xlt_flags);
+
if (err) {
- mlx5_ib_dereg_mr(&mr->ibmr);
+ dereg_mr(dev, mr);
return ERR_PTR(err);
}
}
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err < 0) {
- clean_mr(mr);
+ clean_mr(dev, mr);
return err;
}
}
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
if (err) {
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
ib_umem_release(mr->umem);
- clean_mr(mr);
+ clean_mr(dev, mr);
return err;
}
}
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
}
}
-static int clean_mr(struct mlx5_ib_mr *mr)
+static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int allocated_from_cache = mr->allocated_from_cache;
int err;
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
return 0;
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
- struct mlx5_ib_mr *mr = to_mmr(ibmr);
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
}
#endif
- clean_mr(mr);
+ clean_mr(dev, mr);
if (umem) {
ib_umem_release(umem);
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+
+ return dereg_mr(dev, mr);
+}
+
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg)
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f0dc5f4aa177..442b9bdc0f03 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
mr->ibmr.iova);
set_wqe_32bit_value(wqe->wqe_words,
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
- mr->ibmr.length);
+ lower_32_bits(mr->ibmr.length));
set_wqe_32bit_value(wqe->wqe_words,
NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
set_wqe_32bit_value(wqe->wqe_words,
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
mr->npages * 8);
nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
- "length: %d, rkey: %0x, pgl_paddr: %llx, "
+ "length: %lld, rkey: %0x, pgl_paddr: %llx, "
"page_list_len: %u, wqe_misc: %x\n",
(unsigned long long) mr->ibmr.iova,
mr->ibmr.length,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index dcb5942f9fb5..65b166cc7437 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
err_num = -EAGAIN;
break;
+ default:
+ err_num = -EFAULT;
}
+ break;
default:
err_num = -EFAULT;
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index b2bb42e2805d..254083b524bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -387,7 +387,7 @@ struct qedr_qp {
u8 wqe_size;
u8 smac[ETH_ALEN];
- u16 vlan_id;
+ u16 vlan;
int rc;
} *rqe_wr_id;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 4689e802b332..ad8965397cf7 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
-EINVAL : 0;
- qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
/* note: length stands for data length i.e. GRH is excluded */
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct qedr_cq *cq = get_qedr_cq(ibcq);
struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags;
+ u16 vlan_id;
int i = 0;
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
wc[i].wc_flags |= IB_WC_WITH_SMAC;
- if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+
+ vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_VID_MASK;
+ if (vlan_id) {
wc[i].wc_flags |= IB_WC_WITH_VLAN;
- wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ wc[i].vlan_id = vlan_id;
+ wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
qedr_inc_sw_cons(&qp->rq);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 663a0c301c43..984aa3484928 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib(
return (enum ib_wc_status)status;
}
-static inline int pvrdma_wc_opcode_to_ib(int opcode)
-{
- return opcode;
+static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
+{
+ switch (opcode) {
+ case PVRDMA_WC_SEND:
+ return IB_WC_SEND;
+ case PVRDMA_WC_RDMA_WRITE:
+ return IB_WC_RDMA_WRITE;
+ case PVRDMA_WC_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case PVRDMA_WC_COMP_SWAP:
+ return IB_WC_COMP_SWAP;
+ case PVRDMA_WC_FETCH_ADD:
+ return IB_WC_FETCH_ADD;
+ case PVRDMA_WC_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ case PVRDMA_WC_FAST_REG_MR:
+ return IB_WC_REG_MR;
+ case PVRDMA_WC_MASKED_COMP_SWAP:
+ return IB_WC_MASKED_COMP_SWAP;
+ case PVRDMA_WC_MASKED_FETCH_ADD:
+ return IB_WC_MASKED_FETCH_ADD;
+ case PVRDMA_WC_RECV:
+ return IB_WC_RECV;
+ case PVRDMA_WC_RECV_RDMA_WITH_IMM:
+ return IB_WC_RECV_RDMA_WITH_IMM;
+ default:
+ return IB_WC_SEND;
+ }
}
static inline int pvrdma_wc_flags_to_ib(int flags)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 14b62f7472b4..7774654c2ccb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh;
- if (wc->status != IB_WC_RNR_RETRY_EXC_ERR)
- ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
+ * so don't make waves.
+ */
+ if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
+ wc->status == IB_WC_RETRY_EXC_ERR)
+ ipoib_dbg(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
else
- ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ ipoib_warn(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2e075377242e..6cd61638b441 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
*/
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
-
- /*
- * Update the broadcast address in the priv->broadcast object,
- * in case it already exists, otherwise no one will do that.
- */
- if (priv->broadcast) {
- spin_lock_irq(&priv->lock);
- memcpy(priv->broadcast->mcmember.mgid.raw,
- priv->dev->broadcast + 4,
- sizeof(union ib_gid));
- spin_unlock_irq(&priv->lock);
- }
-
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bac95b509a9b..dcc77014018d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
{
struct ipoib_dev_priv *priv;
struct ib_port_attr attr;
+ struct rdma_netdev *rn;
int result = -ENOMEM;
priv = ipoib_intf_alloc(hca, port, format);
@@ -2279,7 +2280,8 @@ register_failed:
ipoib_dev_cleanup(priv->dev);
device_init_failed:
- free_netdev(priv->dev);
+ rn = netdev_priv(priv->dev);
+ rn->free_rdma_netdev(priv->dev);
kfree(priv);
alloc_mem_failed:
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
- struct rdma_netdev *rn = netdev_priv(priv->dev);
+ struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
unregister_netdev(priv->dev);
mutex_unlock(&priv->sysfs_mutex);
- rn->free_rdma_netdev(priv->dev);
+ parent_rn->free_rdma_netdev(priv->dev);
+
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
+ struct rdma_netdev *child_rn;
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+ child_rn = netdev_priv(cpriv->dev);
+ child_rn->free_rdma_netdev(cpriv->dev);
kfree(cpriv);
+ }
kfree(priv);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9927cd6b7082..55a9b71ed05a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
return restart_syscall();
}
- priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
- if (!priv) {
+ if (!down_write_trylock(&ppriv->vlan_rwsem)) {
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
- return -ENOMEM;
+ return restart_syscall();
}
- down_write(&ppriv->vlan_rwsem);
+ priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (!priv) {
+ result = -ENOMEM;
+ goto out;
+ }
/*
* First ensure this isn't a duplicate. We check the parent device and
@@ -175,8 +178,11 @@ out:
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
- if (result) {
- free_netdev(priv->dev);
+ if (result && priv) {
+ struct rdma_netdev *rn;
+
+ rn = netdev_priv(priv->dev);
+ rn->free_rdma_netdev(priv->dev);
kfree(priv);
}
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
return restart_syscall();
}
- down_write(&ppriv->vlan_rwsem);
+ if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+ rtnl_unlock();
+ mutex_unlock(&ppriv->sysfs_mutex);
+ return restart_syscall();
+ }
+
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
mutex_unlock(&ppriv->sysfs_mutex);
if (dev) {
- free_netdev(dev);
+ struct rdma_netdev *rn;
+
+ rn = netdev_priv(dev);
+ rn->free_rdma_netdev(priv->dev);
kfree(priv);
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9c3e9ab53a41..322209d5ff58 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
{
int i;
- iser_err("page vec npages %d data length %d\n",
+ iser_err("page vec npages %d data length %lld\n",
page_vec->npages, page_vec->fake_mr.length);
for (i = 0; i < page_vec->npages; i++)
iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f2042432c85..66a46c84e28f 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
- * flush_effects - erase all effects owned by a file handle
+ * input_ff_flush - erase all effects owned by a file handle
+ * @dev: input device to erase effect from
+ * @file: purported owner of the effects
+ *
+ * This function erases all force-feedback effects associated with
+ * the given owner from specified device. Note that @file may be %NULL,
+ * in which case all effects will be erased.
*/
-static int flush_effects(struct input_dev *dev, struct file *file)
+int input_ff_flush(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
@@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file)
return 0;
}
+EXPORT_SYMBOL_GPL(input_ff_flush);
/**
* input_ff_event() - generic handler for force-feedback events
@@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
mutex_init(&ff->mutex);
dev->ff = ff;
- dev->flush = flush_effects;
+ dev->flush = input_ff_flush;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d268fdc23c64..762bfb9487dc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
}
EXPORT_SYMBOL(input_set_keycode);
+bool input_match_device_id(const struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
+ if (id->bustype != dev->id.bustype)
+ return false;
+
+ if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
+ if (id->vendor != dev->id.vendor)
+ return false;
+
+ if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
+ if (id->product != dev->id.product)
+ return false;
+
+ if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
+ if (id->version != dev->id.version)
+ return false;
+
+ if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
+ !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
+ !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
+ !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
+ !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
+ !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
+ !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
+ !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
+ !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
+ !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(input_match_device_id);
+
static const struct input_device_id *input_match_device(struct input_handler *handler,
struct input_dev *dev)
{
const struct input_device_id *id;
for (id = handler->id_table; id->flags || id->driver_info; id++) {
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
- if (id->bustype != dev->id.bustype)
- continue;
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
- if (id->vendor != dev->id.vendor)
- continue;
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
- if (id->product != dev->id.product)
- continue;
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
- if (id->version != dev->id.version)
- continue;
-
- if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
- continue;
-
- if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
- continue;
-
- if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
- continue;
-
- if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
- continue;
-
- if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
- continue;
-
- if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
- continue;
-
- if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
- continue;
-
- if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
- continue;
-
- if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
- continue;
-
- if (!handler->match || handler->match(handler, dev))
+ if (input_match_device_id(dev, id) &&
+ (!handler->match || handler->match(handler, dev))) {
return id;
+ }
}
return NULL;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 29d677c714d2..7b29a8944039 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
input_close_device(handle);
}
+/*
+ * These codes are copied from from hid-ids.h, unfortunately there is no common
+ * usb_ids/bt_ids.h header.
+ */
+#define USB_VENDOR_ID_SONY 0x054c
+#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
+
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
+#define ACCEL_DEV(vnd, prd) \
+ { \
+ .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \
+ INPUT_DEVICE_ID_MATCH_PRODUCT | \
+ INPUT_DEVICE_ID_MATCH_PROPBIT, \
+ .vendor = (vnd), \
+ .product = (prd), \
+ .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \
+ }
+
+static const struct input_device_id joydev_blacklist[] = {
+ /* Avoid touchpads and touchscreens */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ },
+ /* Avoid tablets, digitisers and similar devices */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
+ },
+ /* Disable accelerometers on composite devices */
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
+ { /* sentinel */ }
+};
+
+static bool joydev_dev_is_blacklisted(struct input_dev *dev)
+{
+ const struct input_device_id *id;
+
+ for (id = joydev_blacklist; id->flags; id++) {
+ if (input_match_device_id(dev, id)) {
+ dev_dbg(&dev->dev,
+ "joydev: blacklisting '%s'\n", dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
{
DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
{
- /* Avoid touchpads and touchscreens */
- if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit))
- return false;
-
- /* Avoid tablets, digitisers and similar devices */
- if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
+ /* Disable blacklisted devices */
+ if (joydev_dev_is_blacklisted(dev))
return false;
/* Avoid absolute mice */
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index e37e335e406f..6da607d3b811 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
static int tca8418_configure(struct tca8418_keypad *keypad_data,
u32 rows, u32 cols)
{
- int reg, error;
-
- /* Write config register, if this fails assume device not present */
- error = tca8418_write_byte(keypad_data, REG_CFG,
- CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
- if (error < 0)
- return -ENODEV;
-
+ int reg, error = 0;
/* Assemble a mask for row and column registers */
reg = ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
+ if (error)
+ return error;
+
+ error = tca8418_write_byte(keypad_data, REG_CFG,
+ CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+
return error;
}
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
struct input_dev *input;
u32 rows = 0, cols = 0;
int error, row_shift, max_keys;
+ u8 reg;
/* Check i2c driver capabilities */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
keypad_data->client = client;
keypad_data->row_shift = row_shift;
- /* Initialize the chip or fail if chip isn't present */
- error = tca8418_configure(keypad_data, rows, cols);
- if (error < 0)
- return error;
+ /* Read key lock register, if this fails assume device not present */
+ error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
+ if (error)
+ return -ENODEV;
/* Configure input device */
input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
return error;
}
+ /* Initialize the chip */
+ error = tca8418_configure(keypad_data, rows, cols);
+ if (error < 0)
+ return error;
+
error = input_register_device(input);
if (error) {
dev_err(dev, "Unable to register input device, error: %d\n",
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 6cee5adc3b5c..debeeaeb8812 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
static struct platform_driver axp20x_pek_driver = {
.probe = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
MODULE_DESCRIPTION("axp20x Power Button");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6bf82ea8c918..ae473123583b 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return NULL;
}
- while (buflen > 0) {
+ while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
+ if (union_desc->bLength > buflen) {
+ dev_err(&intf->dev, "Too large descriptor\n");
+ return NULL;
+ }
+
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
- return union_desc;
+
+ if (union_desc->bLength >= sizeof(*union_desc))
+ return union_desc;
+
+ dev_err(&intf->dev,
+ "Union descriptor to short (%d vs %zd\n)",
+ union_desc->bLength, sizeof(*union_desc));
+ return NULL;
}
buflen -= union_desc->bLength;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 022be0e22eba..443151de90c6 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev,
uinput_request_alloc_id(udev, request));
}
-static void uinput_request_done(struct uinput_device *udev,
- struct uinput_request *request)
+static void uinput_request_release_slot(struct uinput_device *udev,
+ unsigned int id)
{
/* Mark slot as available */
- udev->requests[request->id] = NULL;
- wake_up(&udev->requests_waitq);
+ spin_lock(&udev->requests_lock);
+ udev->requests[id] = NULL;
+ spin_unlock(&udev->requests_lock);
- complete(&request->done);
+ wake_up(&udev->requests_waitq);
}
static int uinput_request_send(struct uinput_device *udev,
@@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev,
static int uinput_request_submit(struct uinput_device *udev,
struct uinput_request *request)
{
- int error;
+ int retval;
- error = uinput_request_reserve_slot(udev, request);
- if (error)
- return error;
+ retval = uinput_request_reserve_slot(udev, request);
+ if (retval)
+ return retval;
- error = uinput_request_send(udev, request);
- if (error) {
- uinput_request_done(udev, request);
- return error;
- }
+ retval = uinput_request_send(udev, request);
+ if (retval)
+ goto out;
wait_for_completion(&request->done);
- return request->retval;
+ retval = request->retval;
+
+ out:
+ uinput_request_release_slot(udev, request->id);
+ return retval;
}
/*
@@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev)
request = udev->requests[i];
if (request) {
request->retval = -ENODEV;
- uinput_request_done(udev, request);
+ complete(&request->done);
}
}
@@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id)
return uinput_request_submit(udev, &request);
}
+static int uinput_dev_flush(struct input_dev *dev, struct file *file)
+{
+ /*
+ * If we are called with file == NULL that means we are tearing
+ * down the device, and therefore we can not handle FF erase
+ * requests: either we are handling UI_DEV_DESTROY (and holding
+ * the udev->mutex), or the file descriptor is closed and there is
+ * nobody on the other side anymore.
+ */
+ return file ? input_ff_flush(dev, file) : 0;
+}
+
static void uinput_destroy_device(struct uinput_device *udev)
{
const char *name, *phys;
@@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev)
dev->ff->playback = uinput_dev_playback;
dev->ff->set_gain = uinput_dev_set_gain;
dev->ff->set_autocenter = uinput_dev_set_autocenter;
+ /*
+ * The standard input_ff_flush() implementation does
+ * not quite work for uinput as we can't reasonably
+ * handle FF requests during device teardown.
+ */
+ dev->flush = uinput_dev_flush;
}
error = input_register_device(udev->dev);
@@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
}
req->retval = ff_up.retval;
- uinput_request_done(udev, req);
+ complete(&req->done);
goto out;
case UI_END_FF_ERASE:
@@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
}
req->retval = ff_erase.retval;
- uinput_request_done(udev, req);
+ complete(&req->done);
goto out;
}
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 15b1330606c1..e19eb60b3d2f 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client,
}
/* Wait for F/W to update one page ROM data. */
- msleep(20);
+ msleep(35);
error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
if (error) {
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5af0b7d200bc..ee5466a374bf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
.sensor_pdata = {
.sensor_type = rmi_sensor_touchpad,
.axis_align.flip_y = true,
- /* to prevent cursors jumps: */
- .kernel_tracking = true,
+ .kernel_tracking = false,
.topbuttonpad = topbuttonpad,
},
.f30_data = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 32d2762448aa..b3bbad7d2282 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -72,6 +72,9 @@ struct goodix_ts_data {
#define GOODIX_REG_CONFIG_DATA 0x8047
#define GOODIX_REG_ID 0x8140
+#define GOODIX_BUFFER_STATUS_READY BIT(7)
+#define GOODIX_BUFFER_STATUS_TIMEOUT 20
+
#define RESOLUTION_LOC 1
#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
{
+ unsigned long max_timeout;
int touch_num;
int error;
- error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
- GOODIX_CONTACT_SIZE + 1);
- if (error) {
- dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
- return error;
- }
+ /*
+ * The 'buffer status' bit, which indicates that the data is valid, is
+ * not set as soon as the interrupt is raised, but slightly after.
+ * This takes around 10 ms to happen, so we poll for 20 ms.
+ */
+ max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
+ do {
+ error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
+ data, GOODIX_CONTACT_SIZE + 1);
+ if (error) {
+ dev_err(&ts->client->dev, "I2C transfer error: %d\n",
+ error);
+ return error;
+ }
- if (!(data[0] & 0x80))
- return -EAGAIN;
+ if (data[0] & GOODIX_BUFFER_STATUS_READY) {
+ touch_num = data[0] & 0x0f;
+ if (touch_num > ts->max_touch_num)
+ return -EPROTO;
+
+ if (touch_num > 1) {
+ data += 1 + GOODIX_CONTACT_SIZE;
+ error = goodix_i2c_read(ts->client,
+ GOODIX_READ_COOR_ADDR +
+ 1 + GOODIX_CONTACT_SIZE,
+ data,
+ GOODIX_CONTACT_SIZE *
+ (touch_num - 1));
+ if (error)
+ return error;
+ }
+
+ return touch_num;
+ }
- touch_num = data[0] & 0x0f;
- if (touch_num > ts->max_touch_num)
- return -EPROTO;
-
- if (touch_num > 1) {
- data += 1 + GOODIX_CONTACT_SIZE;
- error = goodix_i2c_read(ts->client,
- GOODIX_READ_COOR_ADDR +
- 1 + GOODIX_CONTACT_SIZE,
- data,
- GOODIX_CONTACT_SIZE * (touch_num - 1));
- if (error)
- return error;
- }
+ usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
+ } while (time_before(jiffies, max_timeout));
- return touch_num;
+ /*
+ * The Goodix panel will send spurious interrupts after a
+ * 'finger up' event, which will always cause a timeout.
+ */
+ return 0;
}
static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 157fdb4bb2e8..8c6c6178ec12 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
sdata->input->open = stmfts_input_open;
sdata->input->close = stmfts_input_close;
+ input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
touchscreen_parse_properties(sdata->input, true, &sdata->prop);
- input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
- sdata->prop.max_x, 0, 0);
- input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
- sdata->prop.max_y, 0, 0);
input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 7953381d939a..f1043ae71dcc 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
break;
case 5:
config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
- ts_dev->bit_xn | ts_dev->bit_yp;
+ STEPCONFIG_XNP | STEPCONFIG_YPN;
break;
case 8:
config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 49bd2ab8c507..f3a21343e636 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARM || IOMMU_DMA
- depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
@@ -373,7 +373,8 @@ config MTK_IOMMU_V1
config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
+ depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on HAS_DMA
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 51f8215877f5..8e8874d23717 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
- swiotlb = iommu_pass_through ? 1 : 0;
+ swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
/*
* In case we don't initialize SWIOTLB (actually the common case
- * when AMD IOMMU is enabled), make sure there are global
- * dma_ops set as a fall-back for devices not handled by this
- * driver (for example non-PCI devices).
+ * when AMD IOMMU is enabled and SME is not active), make sure there
+ * are global dma_ops set as a fall-back for devices not handled by
+ * this driver (for example non-PCI devices). When SME is active,
+ * make sure that swiotlb variable remains set so the global dma_ops
+ * continue to be SWIOTLB.
*/
if (!swiotlb)
dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
return unmap_size;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 382de42b8359..6fe2d0346073 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -874,7 +874,7 @@ static bool copy_device_table(void)
hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
entry = (((u64) hi) << 32) + lo;
if (last_entry && last_entry != entry) {
- pr_err("IOMMU:%d should use the same dev table as others!/n",
+ pr_err("IOMMU:%d should use the same dev table as others!\n",
iommu->index);
return false;
}
@@ -882,7 +882,7 @@ static bool copy_device_table(void)
old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
if (old_devtb_size != dev_table_size) {
- pr_err("The device table size of IOMMU:%d is not expected!/n",
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
iommu->index);
return false;
}
@@ -890,7 +890,7 @@ static bool copy_device_table(void)
old_devtb_phys = entry & PAGE_MASK;
if (old_devtb_phys >= 0x100000000ULL) {
- pr_err("The address of old device table is above 4G, not trustworthy!/n");
+ pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -901,7 +901,7 @@ static bool copy_device_table(void)
old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
get_order(dev_table_size));
if (old_dev_tbl_cpy == NULL) {
- pr_err("Failed to allocate memory for copying old device table!/n");
+ pr_err("Failed to allocate memory for copying old device table!\n");
return false;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index ca5ebaeafd6a..57c920c1372d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
#define dmar_parse_one_rhsa dmar_res_noop
#endif
-static void __init
+static void
dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{
struct acpi_dmar_hardware_unit *drhd;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index f596fcc32898..25c2c75f5332 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id sysmmu_of_match[] __initconst = {
+static const struct of_device_id sysmmu_of_match[] = {
{ .compatible = "samsung,exynos-sysmmu", },
{ },
};
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index d665d0dc16e8..6961fc393f0b 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
return;
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bd515be5b380..16d33ac19db0 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret;
spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
+ ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
+ size, prot);
spin_unlock_irqrestore(&dom->pgtlock, flags);
return ret;
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e60e3dba85a0..50947ebb6d17 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
err = of_iommu_xlate(info->dev, &iommu_spec);
of_node_put(iommu_spec.np);
- if (err)
- return err;
-
- return info->np == pdev->bus->dev.of_node;
+ return err;
}
const struct iommu_ops *of_iommu_configure(struct device *dev,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e8d89343d613..e88395605e32 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -107,6 +107,10 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS (16)
+#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
+
/* Convert page order to size in bytes */
#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
+ its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
+ its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
{
- its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+ its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
}
static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val);
+ u64 baser_phys, tmp;
u32 alloc_pages;
void *base;
- u64 tmp;
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
if (!base)
return -ENOMEM;
+ baser_phys = virt_to_phys(base);
+
+ /* Check if the physical address of the memory is above 48bits */
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+ /* 52bit PA is supported only when PageSize=64K */
+ if (psz != SZ_64K) {
+ pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
+
+ /* Convert 52bit PA to 48bit field */
+ baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+ }
+
retry_baser:
- val = (virt_to_phys(base) |
+ val = (baser_phys |
(type << GITS_BASER_TYPE_SHIFT) |
((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
@@ -1582,13 +1602,12 @@ retry_baser:
static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser,
- u32 psz, u32 *order)
+ u32 psz, u32 *order, u32 ids)
{
u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
- u32 ids = its->device_ids;
u32 new_order = *order;
bool indirect = false;
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
continue;
case GITS_BASER_TYPE_DEVICE:
+ indirect = its_parse_indirect_baser(its, baser,
+ psz, &order,
+ its->device_ids);
case GITS_BASER_TYPE_VCPU:
indirect = its_parse_indirect_baser(its, baser,
- psz, &order);
+ psz, &order,
+ ITS_MAX_VPEID_BITS);
break;
}
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
static int its_vpe_id_alloc(void)
{
- return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+ return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
return -ENOMEM;
}
- BUG_ON(entries != vpe_proxy.dev->nr_ites);
+ BUG_ON(entries > vpe_proxy.dev->nr_ites);
raw_spin_lock_init(&vpe_proxy.lock);
vpe_proxy.next_victim = 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 519149ec9053..b5df99c6f680 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn)
{
const __be32 *cell;
u64 hwid;
- int i;
+ int cpu;
cell = of_get_property(dn, "reg", NULL);
if (!cell)
@@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn)
if (hwid & ~MPIDR_HWID_BITMASK)
return -1;
- for (i = 0; i < num_possible_cpus(); i++)
- if (cpu_logical_map(i) == hwid)
- return i;
+ for_each_possible_cpu(cpu)
+ if (cpu_logical_map(cpu) == hwid)
+ return cpu;
return -1;
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 2370e6d9e603..cd0bcc3b7e33 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = MAP_VLPI,
- .map = map,
+ {
+ .map = map,
+ },
};
/*
@@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = GET_VLPI,
- .map = map,
+ {
+ .map = map,
+ },
};
return irq_set_vcpu_affinity(irq, &info);
@@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
{
struct its_cmd_info info = {
.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
- .config = config,
+ {
+ .config = config,
+ },
};
return irq_set_vcpu_affinity(irq, &info);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6e52a88bbd9e..c90976d7e53c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -169,20 +169,19 @@ static void gic_mask_irq(struct irq_data *d)
{
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
- write_gic_rmask(BIT(intr));
+ write_gic_rmask(intr);
gic_clear_pcpu_masks(intr);
}
static void gic_unmask_irq(struct irq_data *d)
{
- struct cpumask *affinity = irq_data_get_affinity_mask(d);
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu;
- write_gic_smask(BIT(intr));
+ write_gic_smask(intr);
gic_clear_pcpu_masks(intr);
- cpu = cpumask_first_and(affinity, cpu_online_mask);
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
}
@@ -420,13 +419,17 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw, unsigned int cpu)
{
int intr = GIC_HWIRQ_TO_SHARED(hw);
+ struct irq_data *data;
unsigned long flags;
+ data = irq_get_irq_data(virq);
+
spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
gic_clear_pcpu_masks(intr);
set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@@ -645,7 +648,7 @@ static int __init gic_of_init(struct device_node *node,
/* Find the first available CPU vector. */
i = 0;
- reserved = (C_SW0 | C_SW1) >> __fls(C_SW0);
+ reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
i++, &cpu_vec))
reserved |= BIT(cpu_vec);
@@ -684,11 +687,11 @@ static int __init gic_of_init(struct device_node *node,
gicconfig = read_gic_config();
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
- gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS);
+ gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
gic_vpes = gicconfig & GIC_CONFIG_PVPS;
- gic_vpes >>= __fls(GIC_CONFIG_PVPS);
+ gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
gic_vpes = gic_vpes + 1;
if (cpu_has_veic) {
@@ -767,7 +770,7 @@ static int __init gic_of_init(struct device_node *node,
for (i = 0; i < gic_shared_intrs; i++) {
change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_trig(i, GIC_TRIG_LEVEL);
- write_gic_rmask(BIT(i));
+ write_gic_rmask(i);
}
for (i = 0; i < gic_vpes; i++) {
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
index bdbb5c0ff7fe..0c085303a583 100644
--- a/drivers/irqchip/irq-tango.c
+++ b/drivers/irqchip/irq-tango.c
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
for (i = 0; i < 2; i++) {
ct[i].chip.irq_ack = irq_gc_ack_set_bit;
ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
- ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+ ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
ct[i].chip.irq_set_type = tangox_irq_set_type;
ct[i].chip.name = gc->domain->name;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 6c44609fd83a..cd2b3c69771a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
isdn_net_local *lp;
struct ippp_struct *is;
int proto;
- unsigned char protobuf[4];
is = file->private_data;
@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
if (!lp)
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
else {
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
+ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+ unsigned char protobuf[4];
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ if (copy_from_user(protobuf, buf, 4))
+ return -EFAULT;
+
+ proto = PPP_PROTOCOL(protobuf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
return 0;
+ }
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED)) {
unsigned short hl;
struct sk_buff *skb;
+ unsigned char *cpy_buf;
/*
* we need to reserve enough space in front of
* sk_buff. old call to dev_alloc_skb only reserved
@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
return count;
}
skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, count), buf, count))
+ cpy_buf = skb_put(skb, count);
+ if (copy_from_user(cpy_buf, buf, count))
{
kfree_skb(skb);
return -EFAULT;
}
+
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ proto = PPP_PROTOCOL(cpy_buf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index bbbbe0898233..9a257f969300 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -112,6 +112,10 @@
#define AS_PEAK_mA_TO_REG(a) \
((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250)
+/* LED numbers for Devicetree */
+#define AS_LED_FLASH 0
+#define AS_LED_INDICATOR 1
+
enum as_mode {
AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
@@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash,
struct device_node *node)
{
struct as3645a_config *cfg = &flash->cfg;
+ struct device_node *child;
const char *name;
int rval;
- flash->flash_node = of_get_child_by_name(node, "flash");
+ for_each_child_of_node(node, child) {
+ u32 id = 0;
+
+ of_property_read_u32(child, "reg", &id);
+
+ switch (id) {
+ case AS_LED_FLASH:
+ flash->flash_node = of_node_get(child);
+ break;
+ case AS_LED_INDICATOR:
+ flash->indicator_node = of_node_get(child);
+ break;
+ default:
+ dev_warn(&flash->client->dev,
+ "unknown LED %u encountered, ignoring\n", id);
+ break;
+ }
+ }
+
if (!flash->flash_node) {
dev_err(&flash->client->dev, "can't find flash node\n");
return -ENODEV;
@@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash,
of_property_read_u32(flash->flash_node, "voltage-reference",
&cfg->voltage_reference);
- of_property_read_u32(flash->flash_node, "peak-current-limit",
+ of_property_read_u32(flash->flash_node, "ams,input-max-microamp",
&cfg->peak);
cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
- flash->indicator_node = of_get_child_by_name(node, "indicator");
if (!flash->indicator_node) {
dev_warn(&flash->client->dev,
"can't find indicator node\n");
@@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client)
as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
v4l2_flash_release(flash->vf);
+ v4l2_flash_release(flash->vfind);
led_classdev_flash_unregister(&flash->fled);
led_classdev_unregister(&flash->iled_cdev);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
- struct closure *cl;
+ struct closure *cl, *t;
struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
reverse = llist_reverse_order(list);
/* Then do the wakeups */
- llist_for_each_entry(cl, reverse, list) {
+ llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 24eddbdf2ab4..203144762f36 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
extern atomic_t dm_global_event_nr;
extern wait_queue_head_t dm_global_eventq;
+void dm_issue_global_event(void);
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a55ffd4f5933..96ab46512e1f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
kfree(cipher_api);
return ret;
}
+ kfree(cipher_api);
return 0;
bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
ti->error = "Invalid feature value for sector_size";
return -EINVAL;
}
+ if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
+ ti->error = "Device size is not multiple of sector_size feature";
+ return -EINVAL;
+ }
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8756a6850431..e52676fa9832 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
* Round up the ptr to an 8-byte boundary.
*/
#define ALIGN_MASK 7
+static inline size_t align_val(size_t val)
+{
+ return (val + ALIGN_MASK) & ~ALIGN_MASK;
+}
static inline void *align_ptr(void *ptr)
{
- return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+ return (void *)align_val((size_t)ptr);
}
/*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
- struct dm_name_list *nl, *old_nl = NULL;
+ struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
uint32_t *event_nr;
down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_entry (hc, _name_buckets + i, name_list) {
- needed += sizeof(struct dm_name_list);
- needed += strlen(hc->name) + 1;
- needed += ALIGN_MASK;
- needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
+ needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+ needed += align_val(sizeof(uint32_t));
}
}
/*
* Grab our output buffer.
*/
- nl = get_result_buffer(param, param_size, &len);
+ nl = orig_nl = get_result_buffer(param, param_size, &len);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
strcpy(nl->name, hc->name);
old_nl = nl;
- event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+ event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
*event_nr = dm_get_event_nr(hc->md);
nl = align_ptr(event_nr + 1);
}
}
+ /*
+ * If mismatch happens, security may be compromised due to buffer
+ * overflow, so it's better to crash.
+ */
+ BUG_ON((char *)nl - (char *)orig_nl != needed);
out:
up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
* which has a variable size, is not used by the function processing
* the ioctl.
*/
-#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
ioctl_fn fn;
} _ioctls[] = {
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
{DM_LIST_DEVICES_CMD, 0, list_devices},
- {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
- {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
- {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
+ {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
{DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+ if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
+ dm_issue_global_event();
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5bfe285ea9d1..2245d06d2045 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
- mddev->pers->make_request(mddev, bio);
+ md_handle_request(mddev, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
static sector_t rs_get_progress(struct raid_set *rs,
sector_t resync_max_sectors, bool *array_in_sync)
{
- sector_t r, recovery_cp, curr_resync_completed;
+ sector_t r, curr_resync_completed;
struct mddev *mddev = &rs->md;
curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- recovery_cp = mddev->recovery_cp;
*array_in_sync = false;
if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
r = curr_resync_completed;
else
- r = recovery_cp;
+ r = mddev->recovery_cp;
- if (r == MaxSector) {
+ if ((r == MaxSector) ||
+ (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+ (mddev->curr_resync_completed == resync_max_sectors))) {
/*
* Sync complete.
*/
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e54145969c5..4be85324f44d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
+void dm_issue_global_event(void)
+{
+ atomic_inc(&dm_global_event_nr);
+ wake_up(&dm_global_eventq);
+}
+
/*
* One of these is allocated per bio.
*/
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
- atomic_inc(&dm_global_event_nr);
wake_up(&md->eventq);
- wake_up(&dm_global_eventq);
+ dm_issue_global_event();
}
/*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
}
map = __bind(md, table, &limits);
+ dm_issue_global_event();
out:
mutex_unlock(&md->suspend_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 08fcaebc61bd..0ff1bbf6c90e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
+void md_handle_request(struct mddev *mddev, struct bio *bio)
+{
+check_suspended:
+ rcu_read_lock();
+ if (mddev->suspended) {
+ DEFINE_WAIT(__wait);
+ for (;;) {
+ prepare_to_wait(&mddev->sb_wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!mddev->suspended)
+ break;
+ rcu_read_unlock();
+ schedule();
+ rcu_read_lock();
+ }
+ finish_wait(&mddev->sb_wait, &__wait);
+ }
+ atomic_inc(&mddev->active_io);
+ rcu_read_unlock();
+
+ if (!mddev->pers->make_request(mddev, bio)) {
+ atomic_dec(&mddev->active_io);
+ wake_up(&mddev->sb_wait);
+ goto check_suspended;
+ }
+
+ if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+ wake_up(&mddev->sb_wait);
+}
+EXPORT_SYMBOL(md_handle_request);
+
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
bio_endio(bio);
return BLK_QC_T_NONE;
}
-check_suspended:
- rcu_read_lock();
- if (mddev->suspended) {
- DEFINE_WAIT(__wait);
- for (;;) {
- prepare_to_wait(&mddev->sb_wait, &__wait,
- TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
- break;
- rcu_read_unlock();
- schedule();
- rcu_read_lock();
- }
- finish_wait(&mddev->sb_wait, &__wait);
- }
- atomic_inc(&mddev->active_io);
- rcu_read_unlock();
/*
* save the sectors now since our bio can
@@ -310,20 +324,14 @@ check_suspended:
sectors = bio_sectors(bio);
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
- if (!mddev->pers->make_request(mddev, bio)) {
- atomic_dec(&mddev->active_io);
- wake_up(&mddev->sb_wait);
- goto check_suspended;
- }
+
+ md_handle_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
- if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
- wake_up(&mddev->sb_wait);
-
return BLK_QC_T_NONE;
}
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
+ /*
+ * must reset flush_bio before calling into md_handle_request to avoid a
+ * deadlock, because other bios passed md_handle_request suspend check
+ * could wait for this and below md_handle_request could wait for those
+ * bios because of suspend check
+ */
+ mddev->flush_bio = NULL;
+ wake_up(&mddev->sb_wait);
+
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio);
else {
bio->bi_opf &= ~REQ_PREFLUSH;
- mddev->pers->make_request(mddev, bio);
+ md_handle_request(mddev, bio);
}
-
- mddev->flush_bio = NULL;
- wake_up(&mddev->sb_wait);
}
void md_flush_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 561d22b9a9a8..d8287d3cd1bf 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
+extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4188a4881148..928e24a07133 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
spin_unlock(&head->batch_head->batch_lock);
goto unlock_out;
}
+ /*
+ * We must assign batch_head of this stripe within the
+ * batch_lock, otherwise clear_batch_ready of batch head
+ * stripe could clear BATCH_READY bit of this stripe and
+ * this stripe->batch_head doesn't get assigned, which
+ * could confuse clear_batch_ready for this stripe
+ */
+ sh->batch_head = head->batch_head;
/*
* at this point, head's BATCH_READY could be cleared, but we
@@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
*/
list_add(&sh->batch_list, &head->batch_list);
spin_unlock(&head->batch_head->batch_lock);
-
- sh->batch_head = head->batch_head;
} else {
head->batch_head = head;
sh->batch_head = head->batch_head;
@@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED)),
+ (1 << STRIPE_DEGRADED) |
+ (1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state;
@@ -6568,14 +6575,17 @@ static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf;
- unsigned long new;
+ unsigned int new;
int err;
struct r5worker_group *new_groups, *old_groups;
int group_cnt, worker_cnt_per_group;
if (len >= PAGE_SIZE)
return -EINVAL;
- if (kstrtoul(page, 10, &new))
+ if (kstrtouint(page, 10, &new))
+ return -EINVAL;
+ /* 8192 should be big enough */
+ if (new > 8192)
return -EINVAL;
err = mddev_lock(mddev);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index eed6c397d840..f8a808d45034 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
*/
switch (msg->msg[1]) {
case CEC_MSG_GET_CEC_VERSION:
- case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
case CEC_MSG_ABORT:
case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
- case CEC_MSG_GIVE_PHYSICAL_ADDR:
case CEC_MSG_GIVE_OSD_NAME:
+ /*
+ * These messages reply with a directed message, so ignore if
+ * the initiator is Unregistered.
+ */
+ if (!adap->passthrough && from_unregistered)
+ return 0;
+ /* Fall through */
+ case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
case CEC_MSG_GIVE_FEATURES:
+ case CEC_MSG_GIVE_PHYSICAL_ADDR:
/*
* Skip processing these messages if the passthrough mode
* is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
if (adap->passthrough)
goto skip_processing;
/* Ignore if addressing is wrong */
- if (is_broadcast || from_unregistered)
+ if (is_broadcast)
return 0;
break;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2fcba1616168..9139d01ba7ed 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
void (*release)(struct dvb_frontend *fe));
-static void dvb_frontend_free(struct kref *ref)
+static void __dvb_frontend_free(struct dvb_frontend *fe)
{
- struct dvb_frontend *fe =
- container_of(ref, struct dvb_frontend, refcount);
struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ if (!fepriv)
+ return;
+
dvb_free_device(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
kfree(fepriv);
+ fe->frontend_priv = NULL;
+}
+
+static void dvb_frontend_free(struct kref *ref)
+{
+ struct dvb_frontend *fe =
+ container_of(ref, struct dvb_frontend, refcount);
+
+ __dvb_frontend_free(fe);
}
static void dvb_frontend_put(struct dvb_frontend *fe)
{
- kref_put(&fe->refcount, dvb_frontend_free);
+ /*
+ * Check if the frontend was registered, as otherwise
+ * kref was not initialized yet.
+ */
+ if (fe->frontend_priv)
+ kref_put(&fe->refcount, dvb_frontend_free);
+ else
+ __dvb_frontend_free(fe);
}
static void dvb_frontend_get(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 224283fe100a..4d086a7248e9 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -55,29 +55,57 @@ struct dib3000mc_state {
static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
{
- u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
- u8 rb[2];
struct i2c_msg msg[2] = {
- { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
- { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+ { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 },
+ { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
};
+ u16 word;
+ u8 *b;
+
+ b = kmalloc(4, GFP_KERNEL);
+ if (!b)
+ return 0;
+
+ b[0] = (reg >> 8) | 0x80;
+ b[1] = reg;
+ b[2] = 0;
+ b[3] = 0;
+
+ msg[0].buf = b;
+ msg[1].buf = b + 2;
if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
dprintk("i2c read error on %d\n",reg);
- return (rb[0] << 8) | rb[1];
+ word = (b[2] << 8) | b[3];
+ kfree(b);
+
+ return word;
}
static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
{
- u8 b[4] = {
- (reg >> 8) & 0xff, reg & 0xff,
- (val >> 8) & 0xff, val & 0xff,
- };
struct i2c_msg msg = {
- .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+ .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
};
- return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+ int rc;
+ u8 *b;
+
+ b = kmalloc(4, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = reg >> 8;
+ b[1] = reg;
+ b[2] = val >> 8;
+ b[3] = val;
+
+ msg.buf = b;
+
+ rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+ kfree(b);
+
+ return rc;
}
static int dib3000mc_identify(struct dib3000mc_state *state)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 7bec3e028bee..5553b89b804e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct i2c_adapter *i2c,
unsigned int pll_desc_id)
{
- u8 b1 [] = { 0 };
- struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
- .buf = b1, .len = 1 };
+ u8 *b1;
+ struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
struct dvb_pll_priv *priv = NULL;
int ret;
const struct dvb_pll_desc *desc;
+ b1 = kmalloc(1, GFP_KERNEL);
+ if (!b1)
+ return NULL;
+
+ b1[0] = 0;
+ msg.buf = b1;
+
if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
(id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer (i2c, &msg, 1);
- if (ret != 1)
+ if (ret != 1) {
+ kfree(b1);
return NULL;
+ }
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
- if (priv == NULL)
+ if (!priv) {
+ kfree(b1);
return NULL;
+ }
priv->pll_i2c_address = pll_addr;
priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
"insmod option" : "autodetected");
}
+ kfree(b1);
+
return fe;
}
EXPORT_SYMBOL(dvb_pll_attach);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 7e7cc49b8674..3c4f7fa7b9d8 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
config VIDEO_QCOM_CAMSS
tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
select VIDEOBUF2_DMA_SG
select V4L2_FWNODE
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b21b3c2dc77f..b22d2dfcd3c2 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
*
* Return -EINVAL or zero on success
*/
-int vfe_set_selection(struct v4l2_subdev *sd,
+static int vfe_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 68933d208063..9b2a401a4891 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
hfi_session_abort(inst);
load_scale_clocks(core);
+ INIT_LIST_HEAD(&inst->registeredbufs);
}
venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
index 1edf667d562a..146ae6f25cdb 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
{
u32 status = 0;
- status = readb(cec->reg + S5P_CEC_STATUS_0);
+ status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
+ status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 58d200e7c838..8837e2678bde 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
dev_dbg(cec->dev, "irq received\n");
if (status & CEC_STATUS_TX_DONE) {
- if (status & CEC_STATUS_TX_ERROR) {
+ if (status & CEC_STATUS_TX_NACK) {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
+ cec->tx = STATE_NACK;
+ } else if (status & CEC_STATUS_TX_ERROR) {
dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
cec->tx = STATE_ERROR;
} else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
cec->tx = STATE_IDLE;
break;
+ case STATE_NACK:
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ cec->tx = STATE_IDLE;
+ break;
case STATE_ERROR:
cec_transmit_done(cec->adap,
CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
index 8bcd8dc1aeb9..86ded522ef27 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -35,6 +35,7 @@
#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
#define CEC_STATUS_TX_DONE (1 << 2)
#define CEC_STATUS_TX_ERROR (1 << 3)
+#define CEC_STATUS_TX_NACK (1 << 4)
#define CEC_STATUS_TX_BYTES (0xFF << 8)
#define CEC_STATUS_RX_RUNNING (1 << 16)
#define CEC_STATUS_RX_RECEIVING (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
STATE_IDLE,
STATE_BUSY,
STATE_DONE,
+ STATE_NACK,
STATE_ERROR
};
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index ed43a4212479..129b558acc92 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init);
module_exit(ir_sharp_decode_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
MODULE_DESCRIPTION("Sharp IR protocol decoder");
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 2e487f9a2cc3..4983eeb39f36 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
{
struct i2c_msg msg[2] = {
- { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 },
- { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
};
+ int rc = 0;
+ u8 *b;
+
+ b = kmalloc(2, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = reg;
+ b[1] = 0;
+
+ msg[0].buf = b;
+ msg[1].buf = b + 1;
if (i2c_transfer(priv->i2c, msg, 2) != 2) {
printk(KERN_WARNING "mt2060 I2C read failed\n");
- return -EREMOTEIO;
+ rc = -EREMOTEIO;
}
- return 0;
+ *val = b[1];
+ kfree(b);
+
+ return rc;
}
// Writes a single register
static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
{
- u8 buf[2] = { reg, val };
struct i2c_msg msg = {
- .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
+ .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
};
+ u8 *buf;
+ int rc = 0;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = reg;
+ buf[1] = val;
+
+ msg.buf = buf;
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "mt2060 I2C write failed\n");
- return -EREMOTEIO;
+ rc = -EREMOTEIO;
}
- return 0;
+ kfree(buf);
+ return rc;
}
// Writes a set of consecutive registers
static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
{
int rem, val_len;
- u8 xfer_buf[16];
+ u8 *xfer_buf;
+ int rc = 0;
struct i2c_msg msg = {
- .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf
+ .addr = priv->cfg->i2c_address, .flags = 0
};
+ xfer_buf = kmalloc(16, GFP_KERNEL);
+ if (!xfer_buf)
+ return -ENOMEM;
+
+ msg.buf = xfer_buf;
+
for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
val_len = min_t(int, rem, priv->i2c_max_regs);
msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
- return -EREMOTEIO;
+ rc = -EREMOTEIO;
+ break;
}
}
- return 0;
+ kfree(xfer_buf);
+ return rc;
}
// Initialisation sequences
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 5dba23ca2e5f..dc9bc1807fdf 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
down_read(&mm->mmap_sem);
- for (dar = addr; dar < addr + size; dar += page_size) {
- if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
+ vma = find_vma(mm, addr);
+ if (!vma) {
+ pr_err("Can't find vma for addr %016llx\n", addr);
+ rc = -EFAULT;
+ goto out;
+ }
+ /* get the size of the pages allocated */
+ page_size = vma_kernel_pagesize(vma);
+
+ for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+ if (dar < vma->vm_start || dar >= vma->vm_end) {
vma = find_vma(mm, addr);
if (!vma) {
pr_err("Can't find vma for addr %016llx\n", addr);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c8307e8b4c16..0ccccbaf530d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
+
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4ff40d319676..78b3172c8e6e 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
@@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/*
- * For not wake-able HW runtime pm framework
- * can't be used on pci device level.
- * Use domain runtime pm callbacks instead.
- */
- if (!pci_dev_run_wake(pdev))
- mei_me_set_pm_domain(dev);
+ * ME maps runtime suspend/resume to D0i states,
+ * hence we need to go around native PCI runtime service which
+ * eventually brings the device into D3cold/hot state,
+ * but the mei device cannot wake up from D3 unlike from D0i3.
+ * To get around the PCI device native runtime pm,
+ * ME uses runtime pm domain handlers which take precedence
+ * over the driver's pm handlers.
+ */
+ mei_me_set_pm_domain(dev);
if (mei_pg_is_enabled(dev))
pm_runtime_put_noidle(&pdev->dev);
@@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_me_unset_pm_domain(dev);
+ mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_me_unset_pm_domain(dev);
+ mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index e38a5f144373..0566f9bfa7de 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/*
- * For not wake-able HW runtime pm framework
- * can't be used on pci device level.
- * Use domain runtime pm callbacks instead.
- */
- if (!pci_dev_run_wake(pdev))
- mei_txe_set_pm_domain(dev);
+ * TXE maps runtime suspend/resume to own power gating states,
+ * hence we need to go around native PCI runtime service which
+ * eventually brings the device into D3cold/hot state.
+ * But the TXE device cannot wake up from D3 unlike from own
+ * power gating. To get around PCI device native runtime pm,
+ * TXE uses runtime pm domain handlers which take precedence.
+ */
+ mei_txe_set_pm_domain(dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_txe_unset_pm_domain(dev);
+ mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_txe_unset_pm_domain(dev);
+ mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
else
ret = -EAGAIN;
- /*
- * If everything is okay we're about to enter PCI low
- * power state (D3) therefor we need to disable the
- * interrupts towards host.
- * However if device is not wakeable we do not enter
- * D-low state and we need to keep the interrupt kicking
- */
- if (!ret && pci_dev_run_wake(pdev))
- mei_disable_interrupts(dev);
+ /* keep irq on we are staying in D0 */
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 29fc1e662891..2ad7b5c69156 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
-
- mmc_queue_bounce_pre(mqrq);
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
brq = &mq_rq->brq;
old_req = mmc_queue_req_to_req(mq_rq);
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
- mmc_queue_bounce_post(mq_rq);
switch (status) {
case MMC_BLK_SUCCESS:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a7eb623f8daa..36217ad5e9b1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1286,6 +1286,23 @@ out_err:
return err;
}
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type;
+
+ card_drv_type = card->ext_csd.raw_driver_strength |
+ mmc_driver_type_mask(0);
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
+
+ card->drive_strength = drive_strength;
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+}
+
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ mmc_select_driver_type(card);
+
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
return err;
}
-static void mmc_select_driver_type(struct mmc_card *card)
-{
- int card_drv_type, drive_strength, drv_type;
-
- card_drv_type = card->ext_csd.raw_driver_strength |
- mmc_driver_type_mask(0);
-
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
-
- card->drive_strength = drive_strength;
-
- if (drv_type)
- mmc_set_driver_type(card->host, drv_type);
-}
-
/*
* For device supporting HS200 mode, the following sequence
* should be done before executing the tuning process.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index affa7370ba82..0a4e77a5ba33 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -23,8 +23,6 @@
#include "core.h"
#include "card.h"
-#define MMC_QUEUE_BOUNCESZ 65536
-
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
-{
- unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
-
- if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
- return 0;
-
- if (bouncesz > host->max_req_size)
- bouncesz = host->max_req_size;
- if (bouncesz > host->max_seg_size)
- bouncesz = host->max_seg_size;
- if (bouncesz > host->max_blk_count * 512)
- bouncesz = host->max_blk_count * 512;
-
- if (bouncesz <= 512)
- return 0;
-
- return bouncesz;
-}
-
/**
* mmc_init_request() - initialize the MMC-specific per-request data
* @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
- if (card->bouncesz) {
- mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
- if (!mq_rq->bounce_buf)
- return -ENOMEM;
- if (card->bouncesz > 512) {
- mq_rq->sg = mmc_alloc_sg(1, gfp);
- if (!mq_rq->sg)
- return -ENOMEM;
- mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
- gfp);
- if (!mq_rq->bounce_sg)
- return -ENOMEM;
- }
- } else {
- mq_rq->bounce_buf = NULL;
- mq_rq->bounce_sg = NULL;
- mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
- if (!mq_rq->sg)
- return -ENOMEM;
- }
+ mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+ if (!mq_rq->sg)
+ return -ENOMEM;
return 0;
}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- /* It is OK to kfree(NULL) so this will be smooth */
- kfree(mq_rq->bounce_sg);
- mq_rq->bounce_sg = NULL;
-
- kfree(mq_rq->bounce_buf);
- mq_rq->bounce_buf = NULL;
-
kfree(mq_rq->sg);
mq_rq->sg = NULL;
}
@@ -265,18 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
- card->bouncesz = mmc_queue_calc_bouncesz(host);
- if (card->bouncesz) {
- blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
- blk_queue_max_segments(mq->queue, card->bouncesz / 512);
- blk_queue_max_segment_size(mq->queue, card->bouncesz);
- } else {
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- }
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
sema_init(&mq->thread_sem, 1);
@@ -365,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
- unsigned int sg_len;
- size_t buflen;
- struct scatterlist *sg;
struct request *req = mmc_queue_req_to_req(mqrq);
- int i;
-
- if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, req, mqrq->sg);
-
- sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
-
- mqrq->bounce_sg_len = sg_len;
-
- buflen = 0;
- for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
- buflen += sg->length;
-
- sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
-
- return 1;
-}
-
-/*
- * If writing, bounce the data to the buffer before the request
- * is sent to the host driver
- */
-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
-{
- if (!mqrq->bounce_buf)
- return;
-
- if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
- return;
-
- sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
- mqrq->bounce_buf, mqrq->sg[0].length);
-}
-
-/*
- * If reading, bounce the data from the buffer after the request
- * has been handled by the host driver
- */
-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
-{
- if (!mqrq->bounce_buf)
- return;
-
- if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
- return;
- sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
- mqrq->bounce_buf, mqrq->sg[0].length);
+ return blk_rq_map_sg(mq->queue, req, mqrq->sg);
}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 04fc89360a7a..f18d3f656baa 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -49,9 +49,6 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
-extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 02179ed2a40d..8c15637178ff 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -5,7 +5,7 @@
comment "MMC/SD/SDIO Host Controller Drivers"
config MMC_DEBUG
- bool "MMC host drivers debugginG"
+ bool "MMC host drivers debugging"
depends on MMC != n
help
This is an option for use by developers; most people should
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index b9cc95998799..eee08d81b242 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 2016 Cavium Inc.
*/
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/mmc/mmc.h>
@@ -149,8 +150,11 @@ error:
for (i = 0; i < CAVIUM_MAX_MMC; i++) {
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
- if (host->slot_pdev[i])
+ if (host->slot_pdev[i]) {
+ get_device(&host->slot_pdev[i]->dev);
of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+ put_device(&host->slot_pdev[i]->dev);
+ }
}
clk_disable_unprepare(host->clk);
return ret;
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 27fb625cbcf3..fbd29f00fca0 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
*/
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
+ MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c885c2d4b904..85745ef179e2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
div->shift = __ffs(CLK_DIV_MASK);
div->width = __builtin_popcountl(CLK_DIV_MASK);
div->hw.init = &init;
- div->flags = (CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ROUND_CLOSEST);
+ div->flags = CLK_DIVIDER_ONE_BASED;
clk = devm_clk_register(host->dev, &div->hw);
if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct meson_host *host = mmc_priv(mmc);
+ int ret;
+
+ /*
+ * If this is the initial tuning, try to get a sane Rx starting
+ * phase before doing the actual tuning.
+ */
+ if (!mmc->doing_retune) {
+ ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
+ if (ret)
+ return ret;
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
}
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+ /* Reset phases */
+ clk_set_phase(host->rx_clk, 0);
+ clk_set_phase(host->tx_clk, 270);
+
break;
case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->vqmmc_enabled = true;
}
- /* Reset rx phase */
- clk_set_phase(host->rx_clk, 0);
break;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 59ab194cb009..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
pxamci_init_ocr(host);
- /*
- * This architecture used to disable bounce buffers through its
- * defconfig, now it is done at runtime as a host property.
- */
- mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
+ mmc->caps = 0;
host->cmdat = 0;
if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index bbaddf18a1b3..67d787fa3306 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
enum {
INTEL_DSM_FNS = 0,
+ INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_DRV_STRENGTH = 9,
INTEL_DSM_D3_RETUNE = 10,
};
@@ -447,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
int err;
u32 val;
+ intel_host->d3_retune = true;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -557,6 +560,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, INTEL_HS400_ES_REG);
}
+static void sdhci_intel_voltage_switch(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
+ struct device *dev = &slot->chip->pdev->dev;
+ u32 result = 0;
+ int err;
+
+ err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result);
+ pr_debug("%s: %s DSM error %d result %u\n",
+ mmc_hostname(host->mmc), __func__, err, result);
+}
+
static const struct sdhci_ops sdhci_intel_byt_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_intel_set_power,
@@ -565,6 +581,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
+ .voltage_switch = sdhci_intel_voltage_switch,
};
static void byt_read_dsm(struct sdhci_pci_slot *slot)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 2eec2e652c53..0842bbc2d7ad 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
+ struct xenon_priv *priv;
int err;
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
/*
* Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
if (err)
goto free_pltfm;
+ priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ if (err == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err)
+ goto err_clk;
+ }
+
err = mmc_of_parse(host->mmc);
if (err)
- goto err_clk;
+ goto err_clk_axi;
sdhci_get_of_property(pdev);
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
/* Xenon specific dt parse */
err = xenon_probe_dt(pdev);
if (err)
- goto err_clk;
+ goto err_clk_axi;
err = xenon_sdhc_prepare(host);
if (err)
- goto err_clk;
+ goto err_clk_axi;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
xenon_sdhc_unprepare(host);
+err_clk_axi:
+ clk_disable_unprepare(priv->axi_clk);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
sdhci_remove_host(host, 0);
xenon_sdhc_unprepare(host);
-
+ clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 2bc0510c0769..9994995c7c56 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -83,6 +83,7 @@ struct xenon_priv {
unsigned char bus_width;
unsigned char timing;
unsigned int clock;
+ struct clk *axi_clk;
int phy_type;
/*
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 12cf8288d663..a7293e186e03 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -129,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
#define CMDREQ_TIMEOUT 5000
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a, status, i) \
- do { \
- if ((status) & TMIO_STAT_##a) { \
- if ((i)++) \
- printk(KERN_DEBUG " | "); \
- printk(KERN_DEBUG #a); \
- } \
- } while (0)
-
-static void pr_debug_status(u32 status)
-{
- int i = 0;
-
- pr_debug("status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE, status, i);
- STATUS_TO_TEXT(CARD_INSERT, status, i);
- STATUS_TO_TEXT(SIGSTATE, status, i);
- STATUS_TO_TEXT(WRPROTECT, status, i);
- STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
- STATUS_TO_TEXT(CARD_INSERT_A, status, i);
- STATUS_TO_TEXT(SIGSTATE_A, status, i);
- STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
- STATUS_TO_TEXT(STOPBIT_ERR, status, i);
- STATUS_TO_TEXT(ILL_FUNC, status, i);
- STATUS_TO_TEXT(CMD_BUSY, status, i);
- STATUS_TO_TEXT(CMDRESPEND, status, i);
- STATUS_TO_TEXT(DATAEND, status, i);
- STATUS_TO_TEXT(CRCFAIL, status, i);
- STATUS_TO_TEXT(DATATIMEOUT, status, i);
- STATUS_TO_TEXT(CMDTIMEOUT, status, i);
- STATUS_TO_TEXT(RXOVERFLOW, status, i);
- STATUS_TO_TEXT(TXUNDERRUN, status, i);
- STATUS_TO_TEXT(RXRDY, status, i);
- STATUS_TO_TEXT(TXRQ, status, i);
- STATUS_TO_TEXT(ILL_ACCESS, status, i);
- printk("\n");
-}
-
-#else
-#define pr_debug_status(s) do { } while (0)
-#endif
-
static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
@@ -762,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
- pr_debug_status(status);
- pr_debug_status(ireg);
-
/* Clear the status except the interrupt status */
sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 5736b0c90b33..a308e707392d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
slave->mtd.erasesize = parent->erasesize;
}
+ /*
+ * Slave erasesize might differ from the master one if the master
+ * exposes several regions with different erasesize. Adjust
+ * wr_alignment accordingly.
+ */
+ if (!(slave->mtd.flags & MTD_NO_ERASE))
+ wr_alignment = slave->mtd.erasesize;
+
tmp = slave->offset;
remainder = do_div(tmp, wr_alignment);
if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 146af8218314..8268636675ef 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
size += (req->ecc.strength + 1) * sizeof(u16);
/* Reserve space for mu, dmu and delta. */
size = ALIGN(size, sizeof(s32));
- size += (req->ecc.strength + 1) * sizeof(s32);
+ size += (req->ecc.strength + 1) * sizeof(s32) * 3;
user = kzalloc(size, GFP_KERNEL);
if (!user)
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c3bb358ef01e..5796468db653 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
res = clk_prepare_enable(host->clk);
if (res)
- goto err_exit1;
+ goto err_put_clk;
nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -814,6 +814,7 @@ err_exit3:
dma_release_channel(host->dma_chan);
err_exit2:
clk_disable_unprepare(host->clk);
+err_put_clk:
clk_put(host->clk);
err_exit1:
lpc32xx_wp_enable(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index bcc8cef1c615..12edaae17d81 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
- int chipnr, realpage, page, blockmask, column;
+ int chipnr, realpage, page, column;
struct nand_chip *chip = mtd_to_nand(mtd);
uint32_t writelen = ops->len;
@@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
- blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
/* Invalidate the page cache, when we write to the cached page */
if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index cf1d4a15e10a..19c000722cbc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the SFDP area to start reading data from
* @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
*
* Whatever the actual numbers of bytes for address and dummy cycles are
* for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
@@ -1829,6 +1829,36 @@ read_err:
return ret;
}
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
struct sfdp_parameter_header {
u8 id_lsb;
u8 minor;
@@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
bfpt_header->length * sizeof(u32));
addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
memset(&bfpt, 0, sizeof(bfpt));
- err = spi_nor_read_sfdp(nor, addr, len, &bfpt);
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
if (err < 0)
return err;
@@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
params->size = bfpt.dwords[BFPT_DWORD(2)];
if (params->size & BIT(31)) {
params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
params->size = 1ULL << params->size;
} else {
params->size++;
@@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
int i, err;
/* Get the SFDP header. */
- err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
if (err < 0)
return err;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 13f0f219d8aa..a13a4896a8bd 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -182,22 +182,23 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re-
- * Filter? connected? detection ception in MB
- * MX25 FlexCAN2 03.00.00.00 no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no
- * MX35 FlexCAN2 03.00.00.00 no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no yes
- * VF610 FlexCAN3 ? no yes yes yes?
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
+ * Filter? connected? Passive detection ception in MB
+ * MX25 FlexCAN2 03.00.00.00 no no ? no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no
+ * MX35 FlexCAN2 03.00.00.00 no no ? no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
+ * VF610 FlexCAN3 ? no yes ? yes yes?
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
-#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
/* Structure of the message buffer */
struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
- .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
+ .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
-static const struct flexcan_devtype_data fsl_imx28_devtype_data;
+static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
}
#endif
+static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
+
+ flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
+
+ flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE;
u32 reg_iflag1, reg_esr;
+ enum can_state last_state = priv->can.state;
reg_iflag1 = flexcan_read(&regs->iflag1);
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
}
- /* state change interrupt */
- if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+ /* state change interrupt or broken error state quirk fix is enabled */
+ if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
+ (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
flexcan_irq_state(dev, reg_esr);
/* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
flexcan_irq_bus_err(dev, reg_esr);
+ /* availability of error interrupt among state transitions in case
+ * bus error reporting is de-activated and
+ * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
+ * +--------------------------------------------------------------+
+ * | +----------------------------------------------+ [stopped / |
+ * | | | sleeping] -+
+ * +-+-> active <-> warning <-> passive -> bus off -+
+ * ___________^^^^^^^^^^^^_______________________________
+ * disabled(1) enabled disabled
+ *
+ * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
+ */
+ if ((last_state != priv->can.state) &&
+ (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
+ switch (priv->can.state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ if (priv->devtype_data->quirks &
+ FLEXCAN_QUIRK_BROKEN_WERR_STATE)
+ flexcan_error_irq_enable(priv);
+ else
+ flexcan_error_irq_disable(priv);
+ break;
+
+ case CAN_STATE_ERROR_WARNING:
+ flexcan_error_irq_enable(priv);
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ case CAN_STATE_BUS_OFF:
+ flexcan_error_irq_disable(priv);
+ break;
+
+ default:
+ break;
+ }
+ }
+
return handled;
}
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
* on most Flexcan cores, too. Otherwise we don't get
* any error warning or passive interrupts.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
else
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce62d32..9fdb0f0bfa06 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
}
cf->can_id = id & ESD_IDMASK;
- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+ cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
if (id & ESD_EXTID)
cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index afcc1312dbaf..68ac3e88a8ce 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
netif_wake_queue(netdev);
}
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
-
- atomic_dec(&dev->active_tx_urbs);
-
- if (!netif_device_present(netdev))
- return;
-
- if (netif_queue_stopped(netdev))
- netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index dce7fa57eb55..f123ed57630d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
{
- /* Use the same MAC Address as FD Pause frames for all ports */
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
+ u16 val = addr[0] << 8 | addr[1];
+
+ /* The multicast bit is always transmitted as a zero, so the switch uses
+ * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
+ */
+ val &= 0xfeff;
+
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c6678aa9b4ef..d74c7335c512 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
};
int i, err;
+ /* DSA and CPU ports have to be members of multiple vlans */
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ return 0;
+
if (!vid_begin)
return -EOPNOTSUPP;
@@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
if (chip->irq > 0) {
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
+ mutex_lock(&chip->reg_lock);
mv88e6xxx_g1_irq_free(chip);
+ mutex_unlock(&chip->reg_lock);
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b1212debc2e1..967020fb26ee 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
- channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
+ channels->max_rx = adapter->num_queues;
+ channels->max_tx = adapter->num_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->num_queues;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f7dc22f65d9f..c6bd5e24005d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bad_csum++;
u64_stats_update_end(&rx_ring->syncp);
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
return;
}
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bad_csum++;
u64_stats_update_end(&rx_ring->syncp);
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
skb->ip_summed = CHECKSUM_NONE;
return;
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
if (ena_dev->mem_bar)
devm_iounmap(&pdev->dev, ena_dev->mem_bar);
- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+ if (ena_dev->reg_bar)
+ devm_iounmap(&pdev->dev, ena_dev->reg_bar);
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 214986436ece..57e796870595 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -22,8 +22,12 @@
#define AQ_CFG_FORCE_LEGACY_INT 0U
-#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U
-#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
+#define AQ_CFG_INTERRUPT_MODERATION_ON 1
+#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
+
+#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
+
#define AQ_CFG_IRQ_MASK 0x1FFU
#define AQ_CFG_VECS_MAX 8U
@@ -51,6 +55,10 @@
#define AQ_CFG_SKB_FRAGS_MAX 32U
+/* Number of descriptors available in one ring to resume this ring queue
+ */
+#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
+
#define AQ_CFG_NAPI_WEIGHT 64U
#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a761e91471df..d5e99b468870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
return aq_nic_set_link_ksettings(aq_nic, cmd);
}
-/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
-static const unsigned int aq_ethtool_stat_queue_lines = 5U;
-static const unsigned int aq_ethtool_stat_queue_chars =
- 5U * ETH_GSTRING_LEN;
static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InPackets",
"InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InOctetsDma",
"OutOctetsDma",
"InDroppedDma",
- "Queue[0] InPackets",
- "Queue[0] OutPackets",
- "Queue[0] InJumboPackets",
- "Queue[0] InLroPackets",
- "Queue[0] InErrors",
- "Queue[1] InPackets",
- "Queue[1] OutPackets",
- "Queue[1] InJumboPackets",
- "Queue[1] InLroPackets",
- "Queue[1] InErrors",
- "Queue[2] InPackets",
- "Queue[2] OutPackets",
- "Queue[2] InJumboPackets",
- "Queue[2] InLroPackets",
- "Queue[2] InErrors",
- "Queue[3] InPackets",
- "Queue[3] OutPackets",
- "Queue[3] InJumboPackets",
- "Queue[3] InLroPackets",
- "Queue[3] InErrors",
- "Queue[4] InPackets",
- "Queue[4] OutPackets",
- "Queue[4] InJumboPackets",
- "Queue[4] InLroPackets",
- "Queue[4] InErrors",
- "Queue[5] InPackets",
- "Queue[5] OutPackets",
- "Queue[5] InJumboPackets",
- "Queue[5] InLroPackets",
- "Queue[5] InErrors",
- "Queue[6] InPackets",
- "Queue[6] OutPackets",
- "Queue[6] InJumboPackets",
- "Queue[6] InLroPackets",
- "Queue[6] InErrors",
- "Queue[7] InPackets",
- "Queue[7] OutPackets",
- "Queue[7] InJumboPackets",
- "Queue[7] InLroPackets",
- "Queue[7] InErrors",
+};
+
+static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
+ "Queue[%d] InPackets",
+ "Queue[%d] OutPackets",
+ "Queue[%d] Restarts",
+ "Queue[%d] InJumboPackets",
+ "Queue[%d] InLroPackets",
+ "Queue[%d] InErrors",
};
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
-/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
- BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
- memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+ memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) *
+ cfg->vecs) * sizeof(u64));
aq_nic_get_stats(aq_nic, data);
}
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
- (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+ drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+ cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = regs_count;
drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
+ int i, si;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
-
- if (stringset == ETH_SS_STATS)
- memcpy(data, *aq_ethtool_stat_names,
- sizeof(aq_ethtool_stat_names) -
- (AQ_CFG_VECS_MAX - cfg->vecs) *
- aq_ethtool_stat_queue_chars);
+ u8 *p = data;
+
+ if (stringset == ETH_SS_STATS) {
+ memcpy(p, *aq_ethtool_stat_names,
+ sizeof(aq_ethtool_stat_names));
+ p = p + sizeof(aq_ethtool_stat_names);
+ for (i = 0; i < cfg->vecs; i++) {
+ for (si = 0;
+ si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_stat_names[si], i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
}
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- ret = ARRAY_SIZE(aq_ethtool_stat_names) -
- (AQ_CFG_VECS_MAX - cfg->vecs) *
- aq_ethtool_stat_queue_lines;
+ ret = ARRAY_SIZE(aq_ethtool_stat_names) +
+ cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
break;
default:
ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
return err;
}
+int aq_ethtool_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
+ cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
+ coal->rx_coalesce_usecs = cfg->rx_itr;
+ coal->tx_coalesce_usecs = cfg->tx_itr;
+ coal->rx_max_coalesced_frames = 0;
+ coal->tx_max_coalesced_frames = 0;
+ } else {
+ coal->rx_coalesce_usecs = 0;
+ coal->tx_coalesce_usecs = 0;
+ coal->rx_max_coalesced_frames = 1;
+ coal->tx_max_coalesced_frames = 1;
+ }
+ return 0;
+}
+
+int aq_ethtool_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ /* This is not yet supported
+ */
+ if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
+ return -EOPNOTSUPP;
+
+ /* Atlantic only supports timing based coalescing
+ */
+ if (coal->rx_max_coalesced_frames > 1 ||
+ coal->rx_coalesce_usecs_irq ||
+ coal->rx_max_coalesced_frames_irq)
+ return -EOPNOTSUPP;
+
+ if (coal->tx_max_coalesced_frames > 1 ||
+ coal->tx_coalesce_usecs_irq ||
+ coal->tx_max_coalesced_frames_irq)
+ return -EOPNOTSUPP;
+
+ /* We do not support frame counting. Check this
+ */
+ if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
+ return -EOPNOTSUPP;
+ if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
+ return -EOPNOTSUPP;
+
+ if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
+ coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
+ return -EINVAL;
+
+ cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
+
+ cfg->rx_itr = coal->rx_coalesce_usecs;
+ cfg->tx_itr = coal->tx_coalesce_usecs;
+
+ return aq_nic_update_interrupt_moderation_settings(aq_nic);
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
.set_link_ksettings = aq_ethtool_set_link_ksettings,
+ .get_coalesce = aq_ethtool_get_coalesce,
+ .set_coalesce = aq_ethtool_set_coalesce,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bf9b3f020e10..0207927dc8a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -151,8 +151,7 @@ struct aq_hw_ops {
[ETH_ALEN],
u32 count);
- int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
- bool itr_enabled);
+ int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
int (*hw_rss_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
int (*hw_get_regs)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+ int (*hw_update_stats)(struct aq_hw_s *self);
+
int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
unsigned int *p_count);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6ac9e2602d6d..483e97691eea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -16,6 +16,7 @@
#include "aq_pci_func.h"
#include "aq_nic_internal.h"
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
@@ -24,6 +25,18 @@
#include <linux/tcp.h>
#include <net/ip.h>
+static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
+module_param_named(aq_itr, aq_itr, uint, 0644);
+MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
+
+static unsigned int aq_itr_tx;
+module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
+
+static unsigned int aq_itr_rx;
+module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
- cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
- cfg->itr = cfg->is_interrupt_moderation ?
- AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+ cfg->itr = aq_itr;
+ cfg->tx_itr = aq_itr_tx;
+ cfg->rx_itr = aq_itr_rx;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -119,6 +132,37 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
return 0;
}
+static int aq_nic_update_link_status(struct aq_nic_s *self)
+{
+ int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+
+ if (err)
+ return err;
+
+ if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
+ pr_info("%s: link change old %d new %d\n",
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
+ self->aq_hw->aq_link_status.mbps);
+ aq_nic_update_interrupt_moderation_settings(self);
+ }
+
+ self->link_status = self->aq_hw->aq_link_status;
+ if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ netif_tx_wake_all_queues(self->ndev);
+ }
+ if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
+ netif_carrier_off(self->ndev);
+ netif_tx_disable(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ }
+ return 0;
+}
+
static void aq_nic_service_timer_cb(unsigned long param)
{
struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,25 +175,12 @@ static void aq_nic_service_timer_cb(unsigned long param)
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
- err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
- if (err < 0)
+ err = aq_nic_update_link_status(self);
+ if (err)
goto err_exit;
- self->link_status = self->aq_hw->aq_link_status;
-
- self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
-
- if (self->link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
- AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
- AQ_NIC_LINK_DOWN);
- netif_carrier_on(self->ndev);
- } else {
- netif_carrier_off(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
- }
+ if (self->aq_hw_ops.hw_update_stats)
+ self->aq_hw_ops.hw_update_stats(self->aq_hw);
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -214,7 +245,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
SET_NETDEV_DEV(ndev, dev);
ndev->if_port = port;
- ndev->min_mtu = ETH_MIN_MTU;
self->ndev = ndev;
self->aq_pci_func = aq_pci_func;
@@ -241,7 +271,6 @@ err_exit:
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
- unsigned int i = 0U;
if (!self->ndev) {
err = -EINVAL;
@@ -263,8 +292,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
netif_carrier_off(self->ndev);
- for (i = AQ_CFG_VECS_MAX; i--;)
- aq_nic_ndev_queue_stop(self, i);
+ netif_tx_disable(self->ndev);
err = register_netdev(self->ndev);
if (err < 0)
@@ -283,6 +311,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+ self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
return 0;
}
@@ -318,12 +347,9 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
err = -EINVAL;
goto err_exit;
}
- if (netif_running(ndev)) {
- unsigned int i;
-
- for (i = AQ_CFG_VECS_MAX; i--;)
- netif_stop_subqueue(ndev, i);
- }
+ if (netif_running(ndev))
+ netif_tx_disable(ndev);
+ netif_carrier_off(self->ndev);
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
self->aq_vecs++) {
@@ -383,16 +409,6 @@ err_exit:
return err;
}
-void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
-{
- netif_start_subqueue(self->ndev, idx);
-}
-
-void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
-{
- netif_stop_subqueue(self->ndev, idx);
-}
-
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
- if (err < 0)
+ err = aq_nic_update_interrupt_moderation_settings(self);
+ if (err)
goto err_exit;
setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
(unsigned long)self);
@@ -451,10 +466,6 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_nic_ndev_queue_start(self, i);
-
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
if (err < 0)
goto err_exit;
@@ -463,6 +474,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ netif_tx_start_all_queues(self->ndev);
+
err_exit:
return err;
}
@@ -475,6 +488,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U;
unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *first = NULL;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +499,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U;
+ dx_buff->eop_index = 0xffffU;
dx_buff->is_ipv6 =
(ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +519,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
goto exit;
+ first = dx_buff;
dx_buff->len_pkt = skb->len;
dx_buff->is_sop = 1U;
dx_buff->is_mapped = 1U;
@@ -532,40 +548,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
for (; nr_frags--; ++frag_count) {
unsigned int frag_len = 0U;
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
dma_addr_t frag_pa;
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
frag_len = skb_frag_size(frag);
- frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
- frag_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
- goto mapping_error;
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
+ frag,
+ buff_offset,
+ buff_size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
+ frag_pa)))
+ goto mapping_error;
- while (frag_len > AQ_CFG_TX_FRAME_MAX) {
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
- dx_buff->len = AQ_CFG_TX_FRAME_MAX;
+ dx_buff->len = buff_size;
dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
- frag_len -= AQ_CFG_TX_FRAME_MAX;
- frag_pa += AQ_CFG_TX_FRAME_MAX;
++ret;
}
-
- dx = aq_ring_next_dx(ring, dx);
- dx_buff = &ring->buff_ring[dx];
-
- dx_buff->flags = 0U;
- dx_buff->len = frag_len;
- dx_buff->pa = frag_pa;
- dx_buff->is_mapped = 1U;
- ++ret;
}
+ first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
goto exit;
@@ -602,7 +624,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
int err = NETDEV_TX_OK;
- bool is_nic_in_bad_state;
frags = skb_shinfo(skb)->nr_frags + 1;
@@ -613,13 +634,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
goto err_exit;
}
- is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
- AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
- (aq_ring_avail_dx(ring) <
- AQ_CFG_SKB_FRAGS_MAX);
+ aq_ring_update_queue_state(ring);
- if (is_nic_in_bad_state) {
- aq_nic_ndev_queue_stop(self, ring->idx);
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
@@ -631,9 +649,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
ring,
frags);
if (err >= 0) {
- if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
- aq_nic_ndev_queue_stop(self, ring->idx);
-
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
@@ -645,6 +660,11 @@ err_exit:
return err;
}
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
+{
+ return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+}
+
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
int err = 0;
@@ -693,16 +713,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
- int err = 0;
-
- if (new_mtu > self->aq_hw_caps.mtu) {
- err = -EINVAL;
- goto err_exit;
- }
self->aq_nic_cfg.mtu = new_mtu;
-err_exit:
- return err;
+ return 0;
}
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +918,8 @@ int aq_nic_stop(struct aq_nic_s *self)
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_nic_ndev_queue_stop(self, i);
+ netif_tx_disable(self->ndev);
+ netif_carrier_off(self->ndev);
del_timer_sync(&self->service_timer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 7fc2a5ecb2b7..4309983acdd6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
u32 vecs; /* vecs==allocated irqs */
u32 irq_type;
u32 itr;
+ u16 rx_itr;
+ u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
- bool is_interrupt_moderation;
bool is_polling;
bool is_rss;
bool is_lro;
@@ -83,8 +84,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
int aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
-void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
-void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
@@ -106,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 4c6c882c6a1c..cadaa646c89f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
int err = 0;
unsigned int bar = 0U;
unsigned int port = 0U;
+ unsigned int numvecs = 0U;
err = pci_enable_device(self->pdev);
if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
}
}
- /*enable interrupts */
+ numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
+ numvecs = min(numvecs, num_online_cpus());
+
+ /* enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
- self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
+ err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
if (err < 0) {
err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
if (err < 0)
goto err_exit;
}
-#endif
+#endif /* AQ_CFG_FORCE_LEGACY_INT */
/* net device init */
for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
aq_nic_ndev_free(self->port[port]);
}
+ if (self->mmio)
+ iounmap(self->mmio);
+
kfree(self);
err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4eee1996a825..0654e0c76bc2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
return 0;
}
+static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
+ unsigned int t)
+{
+ return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+void aq_ring_update_queue_state(struct aq_ring_s *ring)
+{
+ if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
+ aq_ring_queue_stop(ring);
+ else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
+ aq_ring_queue_wake(ring);
+}
+
+void aq_ring_queue_wake(struct aq_ring_s *ring)
+{
+ struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+ if (__netif_subqueue_stopped(ndev, ring->idx)) {
+ netif_wake_subqueue(ndev, ring->idx);
+ ring->stats.tx.queue_restarts++;
+ }
+}
+
+void aq_ring_queue_stop(struct aq_ring_s *ring)
+{
+ struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+ if (!__netif_subqueue_stopped(ndev, ring->idx))
+ netif_stop_subqueue(ndev, ring->idx);
+}
+
void aq_ring_tx_clean(struct aq_ring_s *self)
{
struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
if (likely(buff->is_mapped)) {
- if (unlikely(buff->is_sop))
+ if (unlikely(buff->is_sop)) {
+ if (!buff->is_eop &&
+ buff->eop_index != 0xffffU &&
+ (!aq_ring_dx_in_range(self->sw_head,
+ buff->eop_index,
+ self->hw_head)))
+ break;
+
dma_unmap_single(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
- else
+ } else {
dma_unmap_page(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
+ }
}
if (unlikely(buff->is_eop))
dev_kfree_skb_any(buff->skb);
- }
-}
-static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
- unsigned int t)
-{
- return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+ buff->pa = 0U;
+ buff->eop_index = 0xffffU;
+ }
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 782176c5f4f8..5844078764bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
};
union {
struct {
- u32 len:16;
+ u16 len;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
u32 is_cleaned:1;
u32 is_error:1;
u32 rsvd3:6;
+ u16 eop_index;
+ u16 rsvd4;
};
- u32 flags;
+ u64 flags;
};
};
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
u64 errors;
u64 packets;
u64 bytes;
+ u64 queue_restarts;
};
union aq_ring_stats_s {
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
int aq_ring_init(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_update_queue_state(struct aq_ring_s *ring);
+void aq_ring_queue_wake(struct aq_ring_s *ring);
+void aq_ring_queue_stop(struct aq_ring_s *ring);
void aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ebf588004c46..5fecc9a099ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (ring[AQ_VEC_TX_ID].sw_head !=
ring[AQ_VEC_TX_ID].hw_head) {
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
-
- if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
- AQ_CFG_SKB_FRAGS_MAX) {
- aq_nic_ndev_queue_start(self->aq_nic,
- ring[AQ_VEC_TX_ID].idx);
- }
+ aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
was_tx_cleaned = true;
}
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
stats_tx->errors += tx->errors;
+ stats_tx->queue_restarts += tx->queue_restarts;
}
}
@@ -377,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
aq_vec_add_stats(self, &stats_rx, &stats_tx);
+ /* This data should mimic aq_ethtool_queue_stat_names structure
+ */
data[count] += stats_rx.packets;
data[++count] += stats_tx.packets;
+ data[++count] += stats_tx.queue_restarts;
data[++count] += stats_rx.jumbo_packets;
data[++count] += stats_rx.lro_packets;
data[++count] += stats_rx.errors;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c5a02df7a48b..07b3c49a16a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,24 +765,23 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
- bool itr_enabled)
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
{
unsigned int i = 0U;
+ u32 itr_rx;
- if (itr_enabled && self->aq_nic_cfg->itr) {
- if (self->aq_nic_cfg->itr != 0xFFFFU) {
+ if (self->aq_nic_cfg->itr) {
+ if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
u32 itr_ = (self->aq_nic_cfg->itr >> 1);
itr_ = min(AQ_CFG_IRQ_MASK, itr_);
- PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
- (itr_ << 0x10);
+ itr_rx = 0x80000000U | (itr_ << 0x10);
} else {
u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
if (n < self->aq_link_status.mbps) {
- PHAL_ATLANTIC_A0->itr_rx = 0U;
+ itr_rx = 0U;
} else {
static unsigned int hw_timers_tbl_[] = {
0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
hw_atl_utils_mbps_2_speed_index(
self->aq_link_status.mbps);
- PHAL_ATLANTIC_A0->itr_rx =
- 0x80000000U |
+ itr_rx = 0x80000000U |
(hw_timers_tbl_[speed_index] << 0x10U);
}
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
}
} else {
- PHAL_ATLANTIC_A0->itr_rx = 0U;
+ itr_rx = 0U;
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
- reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+ reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_a0_hw_rss_set,
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 21784cc39dab..ec68c20efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -788,39 +788,45 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
- bool itr_enabled)
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
{
unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
- if (itr_enabled && self->aq_nic_cfg->itr) {
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
tdm_tdm_intr_moder_en_set(self, 1U);
rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
rdm_rdm_intr_moder_en_set(self, 1U);
- PHAL_ATLANTIC_B0->itr_tx = 2U;
- PHAL_ATLANTIC_B0->itr_rx = 2U;
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
- if (self->aq_nic_cfg->itr != 0xFFFFU) {
- unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
- unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
- max_timer = min(0x1FFU, max_timer);
- min_timer = min(0xFFU, min_timer);
+ tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
+ tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
+ rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
+ rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
- PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
- PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
- PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
- PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
} else {
static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
- {0xffU, 0xffU}, /* 10Gbit */
- {0xffU, 0x1ffU}, /* 5Gbit */
- {0xffU, 0x1ffU}, /* 5Gbit 5GS */
- {0xffU, 0x1ffU}, /* 2.5Gbit */
- {0xffU, 0x1ffU}, /* 1Gbit */
- {0xffU, 0x1ffU}, /* 100Mbit */
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
};
static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
hw_atl_utils_mbps_2_speed_index(
self->aq_link_status.mbps);
- PHAL_ATLANTIC_B0->itr_tx |=
- hw_atl_b0_timers_table_tx_[speed_index]
- [0] << 0x8U; /* set min timer value */
- PHAL_ATLANTIC_B0->itr_tx |=
- hw_atl_b0_timers_table_tx_[speed_index]
- [1] << 0x10U; /* set max timer value */
-
- PHAL_ATLANTIC_B0->itr_rx |=
- hw_atl_b0_timers_table_rx_[speed_index]
- [0] << 0x8U; /* set min timer value */
- PHAL_ATLANTIC_B0->itr_rx |=
- hw_atl_b0_timers_table_rx_[speed_index]
- [1] << 0x10U; /* set max timer value */
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl_b0_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl_b0_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl_b0_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl_b0_timers_table_rx_
+ [speed_index][1] << 0x10U;
}
- } else {
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
tdm_tdm_intr_moder_en_set(self, 0U);
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
rdm_rdm_intr_moder_en_set(self, 0U);
- PHAL_ATLANTIC_B0->itr_tx = 0U;
- PHAL_ATLANTIC_B0->itr_rx = 0U;
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(self,
- PHAL_ATLANTIC_B0->itr_tx, i);
- reg_rx_intr_moder_ctrl_set(self,
- PHAL_ATLANTIC_B0->itr_rx, i);
+ reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index f3957e930340..9aa2c6edfca2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -16,7 +16,7 @@
#include "../aq_common.h"
-#define HW_ATL_B0_MTU_JUMBO (16000U)
+#define HW_ATL_B0_MTU_JUMBO 16352U
#define HW_ATL_B0_MTU 1514U
#define HW_ATL_B0_TX_RINGS 4U
@@ -139,6 +139,9 @@
#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
+#define HW_ATL_INTR_MODER_MAX 0x1FF
+#define HW_ATL_INTR_MODER_MIN 0xFF
+
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 4f5ec9a0fbfb..1fe016fc4bc7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -255,6 +255,15 @@ err_exit:
return err;
}
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox_header *pmbox)
+{
+ return hw_atl_utils_fw_downld_dwords(self,
+ PHAL_ATLANTIC->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+}
+
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox *pmbox)
{
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
- if (pmbox != &PHAL_ATLANTIC->mbox)
- memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
-
if (IS_CHIP_FEATURE(REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
{
int err = 0;
u32 transaction_id = 0;
+ struct hw_aq_atl_utils_mbox_header mbox;
if (state == MPI_RESET) {
- hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+ hw_atl_utils_mpi_read_mbox(self, &mbox);
- transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+ transaction_id = mbox.transaction_id;
AQ_HW_WAIT_FOR(transaction_id !=
- (hw_atl_utils_mpi_read_stats
- (self, &PHAL_ATLANTIC->mbox),
- PHAL_ATLANTIC->mbox.transaction_id),
- 1000U, 100U);
+ (hw_atl_utils_mpi_read_mbox(self, &mbox),
+ mbox.transaction_id),
+ 1000U, 100U);
if (err < 0)
goto err_exit;
}
@@ -351,8 +357,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
break;
default:
- link_status->mbps = 0U;
- break;
+ return -EBUSY;
}
}
@@ -493,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
return 0;
}
+int hw_atl_utils_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+ struct hw_aq_atl_utils_mbox mbox;
+
+ if (!self->aq_link_status.mbps)
+ return 0;
+
+ hw_atl_utils_mpi_read_stats(self, &mbox);
+
+#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
+ mbox.stats._N_ - hw_self->last_stats._N_)
+
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+
+#undef AQ_SDELTA
+
+ memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+
+ return 0;
+}
+
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
u64 *data, unsigned int *p_count)
{
- struct hw_atl_stats_s *stats = NULL;
+ struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+ struct hw_atl_stats_s *stats = &hw_self->curr_stats;
int i = 0;
- hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
-
- stats = &PHAL_ATLANTIC->mbox.stats;
-
data[i] = stats->uprc + stats->mprc + stats->bprc;
data[++i] = stats->uprc;
data[++i] = stats->mprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index e0360a6b2202..c99cc690e425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
};
};
-struct __packed hw_aq_atl_utils_mbox {
+struct __packed hw_aq_atl_utils_mbox_header {
u32 version;
u32 transaction_id;
- int error;
+ u32 error;
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+ struct hw_aq_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
};
struct __packed hw_atl_s {
struct aq_hw_s base;
- struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct hw_atl_stats_s curr_stats;
u64 speed;
- u32 itr_tx;
- u32 itr_rx;
unsigned int chip_features;
u32 fw_ver_actual;
atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox_header *pmbox);
+
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox *pmbox);
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+int hw_atl_utils_update_stats(struct aq_hw_s *self);
+
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
u64 *data,
unsigned int *p_count);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c3c53f6cd9e6..83eec9a8c275 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
}
+static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
+ u64 *tx_bytes, u64 *tx_packets)
+{
+ struct bcm_sysport_tx_ring *ring;
+ u64 bytes = 0, packets = 0;
+ unsigned int start;
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->syncp);
+ bytes = ring->bytes;
+ packets = ring->packets;
+ } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+
+ *tx_bytes += bytes;
+ *tx_packets += packets;
+ }
+}
+
static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
struct u64_stats_sync *syncp = &priv->syncp;
struct bcm_sysport_tx_ring *ring;
+ u64 tx_bytes = 0, tx_packets = 0;
unsigned int start;
int i, j;
- if (netif_running(dev))
+ if (netif_running(dev)) {
bcm_sysport_update_mib_counters(priv);
+ bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ }
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
const struct bcm_sysport_stats *s;
@@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev,
continue;
p += s->stat_offset;
- if (s->stat_sizeof == sizeof(u64))
+ if (s->stat_sizeof == sizeof(u64) &&
+ s->type == BCM_SYSPORT_STAT_NETDEV64) {
do {
start = u64_stats_fetch_begin_irq(syncp);
data[i] = *(u64 *)p;
} while (u64_stats_fetch_retry_irq(syncp, start));
- else
+ } else
data[i] = *(u32 *)p;
j++;
}
@@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
- struct bcm_sysport_tx_ring *ring;
- u64 tx_packets = 0, tx_bytes = 0;
unsigned int start;
- unsigned int q;
netdev_stats_to_stats64(stats, &dev->stats);
- for (q = 0; q < dev->num_tx_queues; q++) {
- ring = &priv->tx_rings[q];
- do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
- tx_bytes = ring->bytes;
- tx_packets = ring->packets;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
-
- stats->tx_bytes += tx_bytes;
- stats->tx_packets += tx_packets;
- }
-
- stats64->tx_bytes = stats->tx_bytes;
- stats64->tx_packets = stats->tx_packets;
+ bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
+ &stats->tx_packets);
do {
start = u64_stats_fetch_begin_irq(&priv->syncp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aacec8bc19d5..dc5de275352a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
+static struct workqueue_struct *bnxt_pf_wq;
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
return 0;
}
+static void bnxt_queue_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ queue_work(bnxt_pf_wq, &bp->sp_task);
+ else
+ schedule_work(&bp->sp_task);
+}
+
+static void bnxt_cancel_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ flush_workqueue(bnxt_pf_wq);
+ else
+ cancel_work_sync(&bp->sp_task);
+}
+
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
}
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
default:
goto async_event_process_exit;
}
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
async_event_process_exit:
bnxt_ulp_async_events(bp, cmpl);
return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
}
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+ int timeout)
+{
+ return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+}
+
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
{
int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
if (link_re_init) {
+ mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
+ mutex_unlock(&bp->link_lock);
if (rc)
netdev_warn(bp->dev, "failed to update phy settings\n");
}
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
vnic->rx_mask = mask;
set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
}
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
bnxt_hwrm_port_qstats(bp);
- /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
- * must be the last functions to be called before exiting.
- */
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- int rc = 0;
+ int rc;
+ mutex_lock(&bp->link_lock);
if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
- bnxt_rtnl_lock_sp(bp);
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- rc = bnxt_update_link(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ rc = bnxt_update_link(bp, true);
+ mutex_unlock(&bp->link_lock);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
- bnxt_rtnl_lock_sp(bp);
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_get_port_module_status(bp);
- bnxt_rtnl_unlock_sp(bp);
+ mutex_lock(&bp->link_lock);
+ bnxt_get_port_module_status(bp);
+ mutex_unlock(&bp->link_lock);
}
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&bp->ntp_fltr_lock);
set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
return new_fltr->sw_id;
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
if (bp->vxlan_port_cnt == 1) {
bp->vxlan_port = ti->port;
set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
break;
case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
return;
}
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
return;
}
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_shutdown_tc(bp);
- cancel_work_sync(&bp->sp_task);
+ bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
rc);
return rc;
}
+ mutex_init(&bp->link_lock);
rc = bnxt_update_link(bp, false);
if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
- if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+ if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
+ if (!bnxt_pf_wq) {
+ bnxt_pf_wq =
+ create_singlethread_workqueue("bnxt_pf_wq");
+ if (!bnxt_pf_wq) {
+ dev_err(&pdev->dev, "Unable to create workqueue.\n");
+ goto init_err_pci_clean;
+ }
+ }
bnxt_init_tc(bp);
+ }
rc = register_netdev(dev);
if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
#endif
};
-module_pci_driver(bnxt_pci_driver);
+static int __init bnxt_init(void)
+{
+ return pci_register_driver(&bnxt_pci_driver);
+}
+
+static void __exit bnxt_exit(void)
+{
+ pci_unregister_driver(&bnxt_pci_driver);
+ if (bnxt_pf_wq)
+ destroy_workqueue(bnxt_pf_wq);
+}
+
+module_init(bnxt_init);
+module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ /* To protect link related settings during link changes and
+ * ethtool settings changes.
+ */
+ struct mutex link_lock;
struct bnxt_link_info link_info;
struct ethtool_eee eee;
u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
}
}
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
+ }
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
}
}
+ mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
+ }
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
+ mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
u32 ethtool_speed;
ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ mutex_lock(&bp->link_lock);
bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
base->port = PORT_FIBRE;
}
base->phy_address = link_info->phy_addr;
+ mutex_unlock(&bp->link_lock);
return 0;
}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
if (!BNXT_SINGLE_PF(bp))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
set_setting_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
req.dir_ordinal = cpu_to_le16(ordinal);
req.dir_ext = cpu_to_le16(ext);
req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc == 0) {
if (index)
*index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
if (data_length)
*data_length = le32_to_cpu(output->dir_data_length);
}
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+ int avail_cp, avail_stat;
/* Check if we can enable requested num of vf's. At a mininum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
+ avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
+ avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = min_t(int, avail_cp, avail_stat);
+
while (vfs_supported) {
min_rx_rings = vfs_supported;
min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rx_rings)
rx_ok = 1;
}
- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+ if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+ avail_cp < min_rx_rings)
rx_ok = 0;
- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+ if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ avail_cp >= min_tx_rings)
tx_ok = 1;
if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index ccd699fb2d70..7dd3d131043a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
{
int rc = 0;
+ if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
+ cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index cec94bbb2ea5..8bc126a156e8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
- return -ENOMEM;
+ goto error;
n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e7f54948173f..5b19826a7e16 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
- ns = timespec_to_ns(ts);
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&lio->ptp_lock, flags);
lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 49b80da51ba7..805ab45e9b5a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return true;
default:
bpf_warn_invalid_xdp_action(action);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
+ /* fall through */
case XDP_DROP:
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 38c7b21e5d63..ede1876a9a19 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -374,8 +374,8 @@ struct bufdesc_ex {
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
-#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+#define FEC_NAPI_IMASK FEC_ENET_MII
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 56f56d6ada9c..3dc2d771a222 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
if (int_events == 0)
return false;
- if (int_events & FEC_ENET_RXF)
+ if (int_events & FEC_ENET_RXF_0)
fep->work_rx |= (1 << 2);
if (int_events & FEC_ENET_RXF_1)
fep->work_rx |= (1 << 0);
if (int_events & FEC_ENET_RXF_2)
fep->work_rx |= (1 << 1);
- if (int_events & FEC_ENET_TXF)
+ if (int_events & FEC_ENET_TXF_0)
fep->work_tx |= (1 << 2);
if (int_events & FEC_ENET_TXF_1)
fep->work_tx |= (1 << 0);
@@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id)
}
if (fep->ptp_clock)
- fec_ptp_check_pps_event(fep);
-
+ if (fec_ptp_check_pps_event(fep))
+ ret = IRQ_HANDLED;
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 59efbd605416..5bcb2238acb2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
}
static int hnae3_match_n_instantiate(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev,
- bool is_reg, bool *matched)
+ struct hnae3_ae_dev *ae_dev, bool is_reg)
{
int ret;
- *matched = false;
-
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
- /* there is a match of client and dev */
- *matched = true;
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
@@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
- bool matched;
int ret = 0;
mutex_lock(&hnae3_common_lock);
@@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched);
+ ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port\n");
@@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client);
void hnae3_unregister_client(struct hnae3_client *client)
{
struct hnae3_ae_dev *ae_dev;
- bool matched;
mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false, &matched);
+ hnae3_match_n_instantiate(client, ae_dev, false);
}
list_del(&client->node);
@@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- bool matched;
int ret = 0;
mutex_lock(&hnae3_common_lock);
@@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true,
- &matched);
+ ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed\n");
- if (matched)
- break;
}
}
@@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- bool matched;
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
@@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
/* check the client list for the match with this ae_dev type and
* un-initialize the figure out client instance
*/
- list_for_each_entry(client, &hnae3_client_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false,
- &matched);
- if (matched)
- break;
- }
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- bool matched;
int ret = 0;
mutex_lock(&hnae3_common_lock);
@@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true,
- &matched);
+ ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed\n");
- if (matched)
- break;
}
out_err:
@@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- bool matched;
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
@@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!id)
continue;
- list_for_each_entry(client, &hnae3_client_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false,
- &matched);
- if (matched)
- break;
- }
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index b2f28ae81273..1a01cadfe5f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -49,7 +49,17 @@
#define HNAE3_CLASS_NAME_SIZE 16
#define HNAE3_DEV_INITED_B 0x0
-#define HNAE_DEV_SUPPORT_ROCE_B 0x1
+#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
+#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+
+#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
+ BIT(HNAE3_DEV_SUPPORT_ROCE_B))
+
+#define hnae3_dev_roce_supported(hdev) \
+ hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+
+#define hnae3_dev_dcb_supported(hdev) \
+ hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -366,12 +376,12 @@ struct hnae3_ae_algo {
struct hnae3_tc_info {
u16 tqp_offset; /* TQP offset from base TQP */
u16 tqp_count; /* Total TQPs */
- u8 up; /* user priority */
u8 tc; /* TC index */
bool enable; /* If this TC is enable or not */
};
#define HNAE3_MAX_TC 8
+#define HNAE3_MAX_USER_PRIO 8
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
u16 num_desc;
u8 num_tc; /* Total number of enabled TCs */
+ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
u16 num_tqps; /* total number of TQPs in this handle */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 91ae0135ee50..758cf3948131 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -238,7 +238,7 @@ struct hclge_tqp_map {
u8 rsv[18];
};
-#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11
+#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
enum hclge_int_type {
HCLGE_INT_TX,
@@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
#define HCLGE_INT_TYPE_S 0
#define HCLGE_INT_TYPE_M 0x3
#define HCLGE_TQP_ID_S 2
-#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S)
+#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
+#define HCLGE_INT_GL_IDX_S 13
+#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 rsv;
};
#define HCLGE_TC_NUM 8
@@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc {
struct hclge_rx_priv_buff {
__le16 buf_num[HCLGE_TC_NUM];
- u8 rsv[8];
+ __le16 shared_buf;
+ u8 rsv[6];
};
struct hclge_query_version {
@@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue {
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
+#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bb45365fb817..c1cdbfd83bdb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
- /* Required last entry */
- {0, }
-};
-
-static const struct pci_device_id roce_pci_tbl[] = {
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
- /* Required last entry */
+ /* required last entry */
{0, }
};
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
- if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msix =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
@@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->base_tqp_pid = 0;
hdev->rss_size_max = 1;
hdev->rx_buf_len = cfg.rx_buf_len;
- for (i = 0; i < ETH_ALEN; i++)
- hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
+ ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
+ hdev->hw.mac.phy_addr = cfg.phy_addr;
hdev->num_desc = cfg.tqp_desc_num;
hdev->tm_info.num_pg = 1;
hdev->tm_info.num_tc = cfg.tc_num;
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+ if (hnae3_dev_dcb_supported(hdev))
+ shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+ else
+ shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
+
shared_buf_tc = pfc_enable_num * hdev->mps +
(tc_num - pfc_enable_num) * hdev->mps / 2 +
hdev->mps;
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
struct hclge_priv_buf *priv;
int i;
+ /* When DCB is not supported, rx private
+ * buffer is not allocated.
+ */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ if (!hclge_is_rx_buf_ok(hdev, rx_all))
+ return -ENOMEM;
+
+ return 0;
+ }
+
/* step 1, try to alloc private buffer for all enabled tc */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &hdev->priv_buf[i];
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
priv->wl.high = 2 * hdev->mps;
priv->buf_size = priv->wl.high;
}
+ } else {
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
}
}
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &hdev->priv_buf[i];
- if (hdev->hw_tc_map & BIT(i))
- priv->enable = 1;
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = 128;
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
}
+ req->shared_buf =
+ cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
+ (1 << HCLGE_TC0_PRI_BUF_EN_B));
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
return ret;
}
- ret = hclge_rx_priv_wl_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "could not configure rx private waterline %d\n", ret);
- return ret;
- }
+ if (hnae3_dev_dcb_supported(hdev)) {
+ ret = hclge_rx_priv_wl_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure rx private waterline %d\n",
+ ret);
+ return ret;
+ }
- ret = hclge_common_thrd_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "could not configure common threshold %d\n", ret);
- return ret;
+ ret = hclge_common_thrd_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure common threshold %d\n",
+ ret);
+ return ret;
+ }
}
ret = hclge_common_wl_config(hdev);
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 tc_size[HCLGE_MAX_TC_NUM];
u32 *rss_indir = NULL;
+ u16 rss_size = 0, roundup_size;
const u8 *key;
int i, ret, j;
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
vport[j].rss_indirection_tbl[i] =
- i % hdev->rss_size_max;
+ i % vport[j].alloc_rss_size;
+
+ /* vport 0 is for PF */
+ if (j != 0)
+ continue;
+
+ rss_size = vport[j].alloc_rss_size;
rss_indir[i] = vport[j].rss_indirection_tbl[i];
}
}
@@ -2613,42 +2644,32 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
if (ret)
goto err;
+ /* Each TC have the same queue size, and tc_size set to hardware is
+ * the log2 of roundup power of two of rss_size, the acutal queue
+ * size is limited by indirection table.
+ */
+ if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+ rss_size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- if (hdev->hw_tc_map & BIT(i))
- tc_valid[i] = 1;
- else
- tc_valid[i] = 0;
+ tc_valid[i] = 0;
- switch (hdev->rss_size_max) {
- case HCLGE_RSS_TC_SIZE_0:
- tc_size[i] = 0;
- break;
- case HCLGE_RSS_TC_SIZE_1:
- tc_size[i] = 1;
- break;
- case HCLGE_RSS_TC_SIZE_2:
- tc_size[i] = 2;
- break;
- case HCLGE_RSS_TC_SIZE_3:
- tc_size[i] = 3;
- break;
- case HCLGE_RSS_TC_SIZE_4:
- tc_size[i] = 4;
- break;
- case HCLGE_RSS_TC_SIZE_5:
- tc_size[i] = 5;
- break;
- case HCLGE_RSS_TC_SIZE_6:
- tc_size[i] = 6;
- break;
- case HCLGE_RSS_TC_SIZE_7:
- tc_size[i] = 7;
- break;
- default:
- break;
- }
- tc_offset[i] = hdev->rss_size_max * i;
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
}
+
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
err:
@@ -2679,7 +2700,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2763,8 +2788,12 @@ static int hclge_unmap_ring_from_vector(
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2778,7 +2807,7 @@ static int hclge_unmap_ring_from_vector(
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ HCLGE_OPC_DEL_RING_TO_VECTOR,
false);
req->int_vector_id = vector_id;
}
@@ -3665,6 +3694,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
#define HCLGE_VLAN_TYPE_VF_TABLE 0
#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+ struct hnae3_handle *handle;
int ret;
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
@@ -3674,8 +3704,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
true);
+ if (ret)
+ return ret;
- return ret;
+ handle = &hdev->vport[0].nic;
+ return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -3920,8 +3953,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
goto err;
if (hdev->roce_client &&
- hnae_get_bit(hdev->ae_dev->flag,
- HNAE_DEV_SUPPORT_ROCE_B)) {
+ hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
ret = hclge_init_roce_base_info(vport);
@@ -3944,8 +3976,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
break;
case HNAE3_CLIENT_ROCE:
- if (hnae_get_bit(hdev->ae_dev->flag,
- HNAE_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_client = client;
vport->roce.client = client;
}
@@ -4057,7 +4088,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- const struct pci_device_id *id;
struct hclge_dev *hdev;
int ret;
@@ -4072,10 +4102,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->ae_dev = ae_dev;
ae_dev->priv = hdev;
- id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
- if (id)
- hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
-
ret = hclge_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI init failed\n");
@@ -4138,12 +4164,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- ret = hclge_rss_init_hw(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
- return ret;
- }
-
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -4156,6 +4176,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ return ret;
+ }
+
setup_timer(&hdev->service_timer, hclge_service_timer,
(unsigned long)hdev);
INIT_WORK(&hdev->service_task, hclge_service_task);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index edb10ad075eb..9fcfd9395424 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -176,7 +176,6 @@ struct hclge_pg_info {
struct hclge_tc_info {
u8 tc_id;
u8 tc_sch_mode; /* 0: sp; 1: dwrr */
- u8 up;
u8 pgid;
u32 bw_limit;
};
@@ -197,6 +196,7 @@ struct hclge_tm_info {
u8 num_tc;
u8 num_pg; /* It must be 1 if vNET-Base schd */
u8 pg_dwrr[HCLGE_PG_NUM];
+ u8 prio_tc[HNAE3_MAX_USER_PRIO];
struct hclge_pg_info pg_info[HCLGE_PG_NUM];
struct hclge_tc_info tc_info[HNAE3_MAX_TC];
enum hclge_fc_mode fc_mode;
@@ -477,6 +477,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ u16 alloc_rss_size;
u16 qs_offset;
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 1c577d268f00..73a75d7cc551 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
- for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
- if (hdev->tm_info.tc_info[tc].up == pri_id)
- break;
+ tc = hdev->tm_info.prio_tc[pri_id];
if (tc >= hdev->tm_info.num_tc)
return -EINVAL;
@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
- for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
+ for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
ret = hclge_fill_pri_array(hdev, pri, pri_id);
if (ret)
return ret;
@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id;
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->num_tqps / kinfo->num_tc);
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */
+ vport->alloc_rss_size = kinfo->rss_size;
for (i = 0; i < kinfo->num_tc; i++) {
if (hdev->hw_tc_map & BIT(i)) {
@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
kinfo->tc_info[i].tc = i;
- kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
} else {
/* Set to default queue if TC is disable */
kinfo->tc_info[i].enable = false;
kinfo->tc_info[i].tqp_offset = 0;
kinfo->tc_info[i].tqp_count = 1;
kinfo->tc_info[i].tc = 0;
- kinfo->tc_info[i].up = 0;
}
}
+
+ memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
+ FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
}
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
- hdev->tm_info.tc_info[i].up = i;
hdev->tm_info.tc_info[i].pgid = 0;
hdev->tm_info.tc_info[i].bw_limit =
hdev->tm_info.pg_info[0].bw_limit;
}
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] =
+ (i >= hdev->tm_info.num_tc) ? 0 : i;
+
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
return ret;
+ /* Only DCB-supported dev supports qset back pressure setting */
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_tm_qs_bp_cfg(hdev, i);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 7e67337dfaf2..85158b0d73fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
-#define hclge_tm_set_feild(dest, string, val) \
+#define hclge_tm_set_field(dest, string, val) \
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
-#define hclge_tm_get_feild(src, string) \
+#define hclge_tm_get_field(src, string) \
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
index 1c3e29447891..35369e1c8036 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
@@ -41,11 +41,16 @@ static struct hnae3_client client;
static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
ae_dev->pdev = pdev;
+ ae_dev->flag = ent->driver_data;
ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev);
@@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev)
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
- /* Also copy this new MAC address into hdev */
- if (h->ae_algo->ops->set_mac_addr)
- h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
}
+
+ if (h->ae_algo->ops->set_mac_addr)
+ h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
+
}
static void hns3_nic_set_priv_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 2c74baa2398a..fff09dcf9e34 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&mal->lock, flags);
mal_disable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
- goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cb8182f4fdfa..c66abd476023 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
* places them in a descriptor array, scrq_arr
*/
-static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
- union sub_crq *scrq_arr)
+static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ union sub_crq *scrq_arr)
{
union sub_crq hdr_desc;
int tmp_len = len;
+ int num_descs = 0;
u8 *data, *cur;
int tmp;
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
tmp_len -= tmp;
*scrq_arr = hdr_desc;
scrq_arr++;
+ num_descs++;
}
+
+ return num_descs;
}
/**
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- int tot_len, len;
+ int tot_len;
u8 *hdr_data = txbuff->hdr_data;
tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
txbuff->hdr_data);
- len = tot_len;
- len -= 24;
- if (len > 0)
- num_entries += len % 29 ? len / 29 + 1 : len / 29;
- create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+ *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
txbuff->indir_arr + 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 57505b1df98d..d591b3e6bd7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
}
/**
- * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1519dfb851d0..2756131495f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1038,6 +1038,32 @@ reset_latency:
}
/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_bi[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+/**
* i40e_rx_is_programming_status - check for programming status descriptor
* @qw: qword representing status_error_len in CPU ordering
*
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
u64 qw)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct i40e_rx_buffer *rx_buffer;
+ u32 ntc = rx_ring->next_to_clean;
u8 id;
/* fetch, update, and store next to clean */
+ rx_buffer = &rx_ring->rx_bi[ntc++];
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
prefetch(I40E_RX_DESC(rx_ring, ntc));
+ /* place unused page back on the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
@@ -1639,32 +1674,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
}
/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *old_buff)
-{
- struct i40e_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_bi[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->dma = old_buff->dma;
- new_buff->page = old_buff->page;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
-/**
* i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 523f9d05a810..8a32eb7d47b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
**/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
-#ifndef CONFIG_SPARC
- u32 regval;
- u32 i;
-#endif
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
-
-#ifndef CONFIG_SPARC
- /* Disable relaxed ordering */
- for (i = 0; ((i < hw->mac.max_tx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
- }
-
- for (i = 0; ((i < hw->mac.max_rx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2c19070d2a0b..6e6ab6f6875e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 72c565712a5f..c3e7a8191128 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
- int i, err = 0;
+ int i, j, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
/* allocate temporary buffer to store rings in */
- i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
- i = max_t(int, i, adapter->num_xdp_queues);
+ i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
+ adapter->num_rx_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- memcpy(&temp_ring[i], adapter->xdp_ring[i],
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ memcpy(&temp_ring[i], adapter->xdp_ring[j],
sizeof(struct ixgbe_ring));
temp_ring[i].count = new_tx_count;
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ ixgbe_free_tx_resources(adapter->xdp_ring[j]);
- memcpy(adapter->xdp_ring[i], &temp_ring[i],
+ memcpy(adapter->xdp_ring[j], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d962368d08d0..4d76afd13868 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
return;
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return ixgbe_ptp_set_ts_config(adapter, req);
case SIOCGHWTSTAMP:
return ixgbe_ptp_get_ts_config(adapter, req);
+ case SIOCGMIIPHY:
+ if (!adapter->hw.phy.ops.read_reg)
+ return -EOPNOTSUPP;
+ /* fall through */
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index dd0ee2691c86..9c86cb7cb988 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -333,7 +333,7 @@
#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
-#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
+#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
@@ -676,6 +676,7 @@ enum mvpp2_tag_type {
#define MVPP2_PRS_RI_L3_MCAST BIT(15)
#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
#define MVPP2_PRS_RI_UDF3_MASK 0x300000
#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
@@ -792,6 +793,7 @@ struct mvpp2 {
struct clk *pp_clk;
struct clk *gop_clk;
struct clk *mg_clk;
+ struct clk *axi_clk;
/* List of pointers to port structures */
struct mvpp2_port **port_list;
@@ -2315,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
(proto != IPPROTO_IGMP))
return -EINVAL;
- /* Fragmented packet */
+ /* Not fragmented packet */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
@@ -2334,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
- ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK_L);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
@@ -2346,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
mvpp2_prs_hw_write(priv, &pe);
- /* Not fragmented packet */
+ /* Fragmented packet */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
@@ -2358,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
- mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
- mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
+ ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
@@ -4591,7 +4600,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
- val |= MVPP2_GMAC_PORT_RGMII_MASK;
}
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
@@ -7496,7 +7504,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node,
- struct mvpp2 *priv)
+ struct mvpp2 *priv, int index)
{
struct device_node *phy_node;
struct phy *comphy;
@@ -7670,7 +7678,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- priv->port_list[id] = port;
+ priv->port_list[index] = port;
return 0;
err_free_port_pcpu:
@@ -7963,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev)
err = clk_prepare_enable(priv->mg_clk);
if (err < 0)
goto err_gop_clk;
+
+ priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ if (err == -EPROBE_DEFER)
+ goto err_gop_clk;
+ priv->axi_clk = NULL;
+ } else {
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
}
/* Get system's tclk rate */
@@ -8005,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev)
}
/* Initialize ports */
+ i = 0;
for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv);
+ err = mvpp2_port_probe(pdev, port_node, priv, i);
if (err < 0)
goto err_mg_clk;
+ i++;
}
platform_set_drvdata(pdev, priv);
return 0;
err_mg_clk:
+ clk_disable_unprepare(priv->axi_clk);
if (priv->hw_version == MVPP22)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
@@ -8052,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 1e3a6c3e4132..80eef4163f52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
- TP_PROTO(const struct fs_fte *fte, bool new_fte),
+ TP_PROTO(const struct fs_fte *fte, int new_fte),
TP_ARGS(fte, new_fte),
TP_STRUCT__entry(
__field(const struct fs_fte *, fte)
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
__field(u32, action)
__field(u32, flow_tag)
__field(u8, mask_enable)
- __field(bool, new_fte)
+ __field(int, new_fte)
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f11fd07ac4dd..850cdc980ab5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_any_vid_rules(priv);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_any_vid_rules(priv);
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dfc29720ab77..cc11bbbd0309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
struct mlx5e_sw_stats temp, *s = &temp;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
- u64 tx_offload_none = 0;
int i, j;
memset(s, 0, sizeof(*s));
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_tx += rq_stats->xdp_tx;
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_dropped += sq_stats->dropped;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- tx_offload_none += sq_stats->csum_none;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
}
}
- /* Update calculated offload counters */
- s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
- s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
-
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
err = feature_handler(netdev, enable);
if (err) {
- netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
- enable ? "Enable" : "Disable", feature, err);
+ netdev_err(netdev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f1dd638384d3..15a1687483cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rq->stats.csum_unnecessary++;
return;
}
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum_level = 1;
skb->encapsulation = 1;
rq->stats.csum_unnecessary_inner++;
+ return;
}
+ rq->stats.csum_unnecessary++;
return;
}
csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 6d199ffb1c0b..f8637213afc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_drop;
u64 rx_xdp_tx;
u64 rx_xdp_tx_full;
+ u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_complete;
+ u64 csum_unnecessary;
u64 csum_unnecessary_inner;
u64 csum_none;
u64 lro_packets;
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
+ u64 csum_partial;
u64 csum_partial_inner;
u64 nop;
/* less likely accessed in data path */
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index da503e6411da..1aa2028ed995 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
return true;
}
+static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
+ struct tcf_exts *exts)
+{
+ const struct tc_action *a;
+ bool modify_ip_header;
+ LIST_HEAD(actions);
+ u8 htype, ip_proto;
+ void *headers_v;
+ u16 ethertype;
+ int nkeys, i;
+
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
+
+ /* for non-IP we only re-write MACs, so we're okay */
+ if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
+ goto out_ok;
+
+ modify_ip_header = false;
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (!is_tcf_pedit(a))
+ continue;
+
+ nkeys = tcf_pedit_nkeys(a);
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
+ htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
+ modify_ip_header = true;
+ break;
+ }
+ }
+ }
+
+ ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
+ if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ pr_info("can't offload re-write of ip proto %d\n", ip_proto);
+ return false;
+ }
+
+out_ok:
+ return true;
+}
+
+static bool actions_match_supported(struct mlx5e_priv *priv,
+ struct tcf_exts *exts,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
+{
+ u32 actions;
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ actions = flow->esw_attr->action;
+ else
+ actions = flow->nic_attr->action;
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return modify_header_match_supported(&parse_attr->spec, exts);
+
+ return true;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+ if (!actions_match_supported(priv, exts, parse_attr, flow))
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto free_encap;
}
fl4.flowi4_tos = tun_key->tos;
fl4.daddr = tun_key->u.ipv4.dst;
@@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
&fl4, &n, &ttl);
if (err)
- goto out;
+ goto free_encap;
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
*/
err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
if (err)
- goto out;
+ goto free_encap;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
@@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
kfree(encap_header);
+out:
if (n)
neigh_release(n);
return err;
@@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto free_encap;
}
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
@@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
&fl6, &n, &ttl);
if (err)
- goto out;
+ goto free_encap;
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
*/
err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
if (err)
- goto out;
+ goto free_encap;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
@@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
kfree(encap_header);
+out:
if (n)
neigh_release(n);
return err;
@@ -1791,6 +1859,7 @@ vxlan_encap_offload_err:
}
}
+ /* must verify if encap is valid or not */
if (found)
goto attach_flow;
@@ -1817,6 +1886,8 @@ attach_flow:
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
attr->encap_id = e->encap_id;
+ else
+ err = -EAGAIN;
return err;
@@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+
+ if (!actions_match_supported(priv, exts, parse_attr, flow))
+ return -EOPNOTSUPP;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index fee43e40fa16..1d6925d4369a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ sq->stats.csum_partial++;
}
} else
sq->stats.csum_none++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index e37453d838db..c0fd2212e890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
return 0;
}
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+int mlx5_fpga_caps(struct mlx5_core_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
- return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+ return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
MLX5_ST_SZ_BYTES(fpga_cap),
MLX5_REG_FPGA_CAP, 0, 0);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index 94bdfd47c3f0..d05233c9b4f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
u64 rx_total_drop;
};
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_caps(struct mlx5_core_dev *dev);
int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 9034e9960a76..dc8970346521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
if (err)
goto out;
- err = mlx5_fpga_caps(fdev->mdev,
- fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+ err = mlx5_fpga_caps(fdev->mdev);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index e0d0efd903bc..36ecc2b2e187 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
}
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+ log_max_flow_counter,
+ ft->type));
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
list_size++;
}
+ if (list_size > max_list_size) {
+ err = -EINVAL;
+ goto err_out;
+ }
MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
list_size);
}
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5509a752f98e..48dd78975062 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -52,6 +52,7 @@ enum fs_flow_table_type {
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
};
enum fs_flow_table_op_mod {
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, &(fte)->node.children)
+#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
+ (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
+ (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
+ (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
+ (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
+ (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
+ )
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 85298051a3e4..145e392ab849 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
const struct mlx5e_profile *profile = priv->profile;
+ struct mlx5_core_dev *mdev = priv->mdev;
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
destroy_workqueue(priv->wq);
free_netdev(netdev);
- mlx5e_destroy_mdev_resources(priv->mdev);
+ mlx5e_destroy_mdev_resources(mdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 6c48e9959b65..2a8b529ce6dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
mlx5_core_warn(dev,
"failed to restore VF %d settings, err %d\n",
vf, err);
- continue;
+ continue;
}
}
mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9d5e7cf288be..f3315bc874ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -96,6 +96,7 @@ struct mlxsw_core {
const struct mlxsw_bus *bus;
void *bus_priv;
const struct mlxsw_bus_info *bus_info;
+ struct workqueue_struct *emad_wq;
struct list_head rx_listener_list;
struct list_head event_listener_list;
struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
- mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
+ struct workqueue_struct *emad_wq;
u64 tid;
int err;
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
+ emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+ if (!emad_wq)
+ return -ENOMEM;
+ mlxsw_core->emad_wq = emad_wq;
+
/* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other
* devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+ destroy_workqueue(mlxsw_core->emad_wq);
return err;
}
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
mlxsw_core->emad.use_emad = false;
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+ destroy_workqueue(mlxsw_core->emad_wq);
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index cc27c5de5a1d..4afc8486eb9a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
mlxsw_reg_mgpc_opcode_set(payload, opcode);
}
+/* TIGCR - Tunneling IPinIP General Configuration Register
+ * -------------------------------------------------------
+ * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
+ */
+#define MLXSW_REG_TIGCR_ID 0xA801
+#define MLXSW_REG_TIGCR_LEN 0x10
+
+MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
+
+/* reg_tigcr_ipip_ttlc
+ * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
+ * header.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
+
+/* reg_tigcr_ipip_ttl_uc
+ * The TTL for IPinIP Tunnel encapsulation of unicast packets if
+ * reg_tigcr_ipip_ttlc is unset.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
+
+static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
+{
+ MLXSW_REG_ZERO(tigcr, payload);
+ mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
+ mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcc),
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
+ MLXSW_REG(tigcr),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2cfb3f5d092d..5189022a1c8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_rif_fini(nh);
break;
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ mlxsw_sp_nexthop_rif_fini(nh);
mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
break;
}
@@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ if (err)
+ return err;
+ mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
+ return 0;
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3500,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
- struct mlxsw_sp_lpm_tree *lpm_tree;
-
- /* Aggregate prefix lengths across all virtual routers to make
- * sure we only have used prefix lengths in the LPM tree.
- */
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- fib->proto);
- if (IS_ERR(lpm_tree))
- goto err_tree_get;
- mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
-
-err_tree_get:
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
return;
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
@@ -4009,7 +4000,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV6)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ if (err)
+ return err;
+ mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
+ return 0;
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -5068,6 +5063,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
if (IS_ERR(vr))
return ERR_CAST(vr);
+ vr->rif_count++;
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err)
@@ -5099,7 +5095,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif;
- vr->rif_count++;
return rif;
@@ -5110,6 +5105,7 @@ err_fid_get:
kfree(rif);
err_rif_alloc:
err_rif_index_alloc:
+ vr->rif_count--;
mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
@@ -5124,7 +5120,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- vr->rif_count--;
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
ops->deconfigure(rif);
@@ -5132,6 +5127,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
kfree(rif);
+ vr->rif_count--;
mlxsw_sp_vr_put(vr);
}
@@ -5900,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->router->rifs);
}
+static int
+mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
+{
+ char tigcr_pl[MLXSW_REG_TIGCR_LEN];
+
+ mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
+}
+
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
- return 0;
+ return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1c0187f0af51..e118b5f23996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!dp->xdp_prog)
+ if (!dp->xdp_prog) {
frag = netdev_alloc_frag(dp->fl_bufsz);
- else
- frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_COLD);
+ frag = page ? page_address(page) : NULL;
+ }
if (!frag) {
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!dp->xdp_prog)
+ if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz);
- else
- frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ frag = page ? page_address(page) : NULL;
+ }
if (!frag) {
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 07969f06df10..dc016dfec64d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
- *data++ = nn->r_vecs[i].rx_pkts;
+ data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
- *data++ = nn->r_vecs[i].tx_pkts;
- *data++ = nn->r_vecs[i].tx_busy;
+ data[1] = nn->r_vecs[i].tx_pkts;
+ data[2] = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ data += 3;
+
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index bbe24639aa5a..c8c6231b87f3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data)
static int emac_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return 1;
case ETH_SS_STATS:
return EMAC_STATS_LEN;
default:
@@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
unsigned int i;
switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ strcpy(data, "single-pause-mode");
+ break;
+
case ETH_SS_STATS:
for (i = 0; i < EMAC_STATS_LEN; i++) {
strlcpy(data, emac_ethtool_stat_strings[i],
@@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev)
return EMAC_MAX_REG_SIZE * sizeof(u32);
}
+#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0)
+
+static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
+
+ if (netif_running(netdev))
+ return emac_reinit_locked(adpt);
+
+ return 0;
+}
+
+static u32 emac_get_priv_flags(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
+}
+
static const struct ethtool_ops emac_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_regs_len = emac_get_regs_len,
.get_regs = emac_get_regs,
+
+ .set_priv_flags = emac_set_priv_flags,
+ .get_priv_flags = emac_get_priv_flags,
};
void emac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index bcd4708b3745..3ed9033e56db 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt)
mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
DEBUG_MODE | SINGLE_PAUSE_MODE);
+ /* Enable single-pause-frame mode if requested.
+ *
+ * If enabled, the EMAC will send a single pause frame when the RX
+ * queue is full. This normally leads to packet loss because
+ * the pause frame disables the remote MAC only for 33ms (the quanta),
+ * and then the remote MAC continues sending packets even though
+ * the RX queue is still full.
+ *
+ * If disabled, the EMAC sends a pause frame every 31ms until the RX
+ * queue is no longer full. Normally, this is the preferred
+ * method of operation. However, when the system is hung (e.g.
+ * cores are halted), the EMAC interrupt handler is never called
+ * and so the RX queue fills up quickly and stays full. The resuling
+ * non-stop "flood" of pause frames sometimes has the effect of
+ * disabling nearby switches. In some cases, other nearby switches
+ * are also affected, shutting down the entire network.
+ *
+ * The user can enable or disable single-pause-frame mode
+ * via ethtool.
+ */
+ mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
+
writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
@@ -876,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
curr_rxbuf->dma_addr =
dma_map_single(adpt->netdev->dev.parent, skb->data,
- curr_rxbuf->length, DMA_FROM_DEVICE);
+ adpt->rxbuf_size, DMA_FROM_DEVICE);
+
ret = dma_mapping_error(adpt->netdev->dev.parent,
curr_rxbuf->dma_addr);
if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 60850bfa3d32..759543512117 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
/* default to automatic flow control */
adpt->automatic = true;
+
+ /* Disable single-pause-frame mode by default */
+ adpt->single_pause_mode = false;
}
/* Get the clock */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
index 8ee4ec6aef2e..d7c9f44209d4 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -363,6 +363,9 @@ struct emac_adapter {
bool tx_flow_control;
bool rx_flow_control;
+ /* True == use single-pause-frame mode. */
+ bool single_pause_mode;
+
/* Ring parameter */
u8 tpd_burst;
u8 rfd_burst;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 98f22551eb45..1e33aea59f50 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -51,10 +51,7 @@ struct rmnet_walk_data {
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
- rx_handler_func_t *rx_handler;
-
- rx_handler = rcu_dereference(real_dev->rx_handler);
- return (rx_handler == rmnet_rx_handler);
+ return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
}
/* Needs rtnl lock */
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ca22f2898664..d24b47b8e0b2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
if (likely(RTL_R16(IntrStatus) & RxAckBits))
work_done += rtl8139_rx(dev, tp, budget);
- if (work_done < budget && napi_complete_done(napi, work_done)) {
+ if (work_done < budget) {
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
- RTL_W16_F(IntrMask, rtl8139_intr_mask);
+ if (napi_complete_done(napi, work_done))
+ RTL_W16_F(IntrMask, rtl8139_intr_mask);
spin_unlock_irqrestore(&tp->lock, flags);
}
spin_unlock(&tp->rx_lock);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e03fcf914690..a3c949ea7d1a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
-
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
index a63ef82e7c72..dfae3c9d57c6 100644
--- a/drivers/net/ethernet/rocker/rocker_tlv.h
+++ b/drivers/net/ethernet/rocker/rocker_tlv.h
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
int rocker_tlv_put(struct rocker_desc_info *desc_info,
int attrtype, int attrlen, const void *data);
-static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
- int attrtype, u8 value)
+static inline int
+rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+ u8 tmp = value; /* work around GCC PR81715 */
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
}
-static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
- int attrtype, u16 value)
+static inline int
+rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
}
-static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
- int attrtype, __be16 value)
+static inline int
+rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
}
-static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
- int attrtype, u32 value)
+static inline int
+rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
}
-static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
- int attrtype, __be32 value)
+static inline int
+rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
}
-static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
- int attrtype, u64 value)
+static inline int
+rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+ u64 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
}
static inline struct rocker_tlv *
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd6a2f9791cc..5efef8001edf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = {
.remove = dwc_eth_dwmac_remove,
.driver = {
.name = "dwc-eth-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = dwc_eth_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 99823f54696a..13133b30b575 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -83,6 +83,117 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK3128_GRF_MAC_CON0 0x0168
+#define RK3128_GRF_MAC_CON1 0x016c
+
+/* RK3128_GRF_MAC_CON0 */
+#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3128_GRF_MAC_CON1 */
+#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
+#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
+#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
+#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
+#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+
+static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
+ DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
+ RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+}
+
+static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_RMII_CLK_2_5M |
+ RK3128_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_RMII_CLK_25M |
+ RK3128_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3128_ops = {
+ .set_to_rgmii = rk3128_set_to_rgmii,
+ .set_to_rmii = rk3128_set_to_rmii,
+ .set_rgmii_speed = rk3128_set_rgmii_speed,
+ .set_rmii_speed = rk3128_set_rmii_speed,
+};
+
#define RK3228_GRF_MAC_CON0 0x0900
#define RK3228_GRF_MAC_CON1 0x0904
@@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
{ .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c4407e8e39a3..2f7d7ec59962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
{
void __iomem *ioaddr = hw->pcsr;
unsigned int pmt = 0;
+ u32 config;
if (mode & WAKE_MAGIC) {
pr_debug("GMAC: WOL Magic frame\n");
@@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
pmt |= power_down | global_unicast | wake_up_frame_en;
}
+ if (pmt) {
+ /* The receiver must be enabled for WOL before powering down */
+ config = readl(ioaddr + GMAC_CONFIG);
+ config |= GMAC_CONFIG_RE;
+ writel(config, ioaddr + GMAC_CONFIG);
+ }
writel(pmt, ioaddr + GMAC_PMT);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e0ef02f9503b..4b286e27c4ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
goto exit;
i++;
- } while ((ret == 1) || (i < 10));
+ } while ((ret == 1) && (i < 10));
if (i == 10)
ret = -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 67af0bdd7f10..7516ca210855 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 100000, 10000);
+ 10000, 100000);
if (err)
return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1763e48c84e2..16bd50929084 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
struct dma_desc *np, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ struct dma_desc *desc = p;
u64 ns;
if (!priv->hwts_rx_en)
return;
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4)
+ desc = np;
/* Check if timestamp is available */
- if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
- /* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4)
- ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
- else
- ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
-
+ if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry = tx_q->dirty_tx;
+ unsigned int entry;
netif_tx_lock(priv->dev);
priv->xstats.tx_clean++;
+ entry = tx_q->dirty_tx;
while (entry != tx_q->cur_tx) {
struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
+ dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
rx_q->rx_skbuff[entry] = NULL;
dma_unmap_single(priv->device,
rx_q->rx_skbuff_dma[entry],
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index a366b3747eeb..8a280b48e3a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
{ .compatible = "allwinner,sun8i-h3-emac" },
{ .compatible = "allwinner,sun8i-v3s-emac" },
{ .compatible = "allwinner,sun50i-a64-emac" },
+ {},
};
/* If phy-handle property is passed from DT, use it as the PHY */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index f6404074b7b0..ed51018a813e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
{
-#ifdef __BIG_ENDIAN
- return (vni[0] == tun_id[2]) &&
- (vni[1] == tun_id[1]) &&
- (vni[2] == tun_id[0]);
-#else
return !memcmp(vni, &tun_id[5], 3);
-#endif
}
static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d98cdfb1536b..5176be76ca7d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -150,6 +150,8 @@ struct netvsc_device_info {
u32 num_chn;
u32 send_sections;
u32 recv_sections;
+ u32 send_section_size;
+ u32 recv_section_size;
};
enum rndis_device_state {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a5511b7326af..8d5077fb0492 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -76,9 +76,6 @@ static struct netvsc_device *alloc_net_device(void)
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
- net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE;
- net_device->send_section_size = NETVSC_SEND_SECTION_SIZE;
-
init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open);
INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
@@ -262,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device,
int ret = 0;
/* Get receive buffer area. */
- buf_size = device_info->recv_sections * net_device->recv_section_size;
+ buf_size = device_info->recv_sections * device_info->recv_section_size;
buf_size = roundup(buf_size, PAGE_SIZE);
net_device->recv_buf = vzalloc(buf_size);
@@ -344,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
/* Now setup the send buffer. */
- buf_size = device_info->send_sections * net_device->send_section_size;
+ buf_size = device_info->send_sections * device_info->send_section_size;
buf_size = round_up(buf_size, PAGE_SIZE);
net_device->send_buf = vzalloc(buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d4902ee5f260..a32ae02e1b6c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -848,7 +848,9 @@ static int netvsc_set_channels(struct net_device *net,
device_info.num_chn = count;
device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
+ device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
+ device_info.recv_section_size = nvdev->recv_section_size;
rndis_filter_device_remove(dev, nvdev);
@@ -963,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
+ device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
+ device_info.recv_section_size = nvdev->recv_section_size;
rndis_filter_device_remove(hdev, nvdev);
@@ -1485,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
device_info.num_chn = nvdev->num_chn;
device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
+ device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
+ device_info.recv_section_size = nvdev->recv_section_size;
netif_device_detach(ndev);
was_opened = rndis_filter_opened(nvdev);
@@ -1934,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev,
device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
+ device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
device_info.recv_sections = NETVSC_DEFAULT_RX;
+ device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
nvdev = rndis_filter_device_add(dev, &device_info);
if (IS_ERR(nvdev)) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 98e4deaa3a6a..5ab1b8849c30 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (unlikely(ret < 0)) {
+ aead_request_free(req);
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (unlikely(ret < 0)) {
+ aead_request_free(req);
kfree_skb(skb);
return ERR_PTR(ret);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a9d16a3af514..cd931cf9dcc2 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -160,15 +160,6 @@ config MDIO_XGENE
endif
-menuconfig PHYLIB
- tristate "PHY Device support and infrastructure"
- depends on NETDEVICES
- select MDIO_DEVICE
- help
- Ethernet controllers are usually attached to PHY
- devices. This option provides infrastructure for
- managing PHY devices.
-
config PHYLINK
tristate
depends on NETDEVICES
@@ -179,6 +170,15 @@ config PHYLINK
configuration links, PHYs, and Serdes links with MAC level
autonegotiation modes.
+menuconfig PHYLIB
+ tristate "PHY Device support and infrastructure"
+ depends on NETDEVICES
+ select MDIO_DEVICE
+ help
+ Ethernet controllers are usually attached to PHY
+ devices. This option provides infrastructure for
+ managing PHY devices.
+
if PHYLIB
config SWPHY
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e842d2cd1ee7..2b1e67bc1e73 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.port = PORT_BNC;
else
cmd->base.port = PORT_MII;
-
+ cmd->base.transceiver = phy_is_internal(phydev) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8cf0c5901f95..67f25ac29025 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
char *irq_str;
- char irq_num[4];
+ char irq_num[8];
switch(phydev->irq) {
case PHY_POLL:
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index d15dd3938ba8..2e5150b0b8d5 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
priv->phy_drv->read_status(phydev);
val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
- val &= XILINX_GMII2RGMII_SPEED_MASK;
+ val &= ~XILINX_GMII2RGMII_SPEED_MASK;
if (phydev->speed == SPEED_1000)
val |= BMCR_SPEED1000;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a404552555d4..e365866600ba 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,7 +120,7 @@ struct ppp {
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
- int *xmit_recursion __percpu; /* xmit recursion detect */
+ int __percpu *xmit_recursion; /* xmit recursion detect */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
static int ppp_dev_init(struct net_device *dev)
{
+ struct ppp *ppp;
+
netdev_lockdep_set_classes(dev);
+
+ ppp = netdev_priv(dev);
+ /* Let the netdevice take a reference on the ppp file. This ensures
+ * that ppp_destroy_interface() won't run before the device gets
+ * unregistered.
+ */
+ atomic_inc(&ppp->file.refcnt);
+
return 0;
}
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
wake_up_interruptible(&ppp->file.rwait);
}
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ ppp = netdev_priv(dev);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->priv_destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3c9985f29950..e21bf90b819f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1496,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
- switch (skb->data[0] & 0xf0) {
- case 0x40:
+ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+ switch (ip_version) {
+ case 4:
pi.proto = htons(ETH_P_IP);
break;
- case 0x60:
+ case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
@@ -2025,6 +2027,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
+ err = dev_get_valid_name(net, dev, name);
+ if (err)
+ goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8ab281b478f2..52ea80bcd639 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
desc->bInterfaceProtocol == 3);
}
+static int is_novatel_rndis(struct usb_interface_descriptor *desc)
+{
+ return (desc->bInterfaceClass == USB_CLASS_MISC &&
+ desc->bInterfaceSubClass == 4 &&
+ desc->bInterfaceProtocol == 1);
+}
+
#else
#define is_rndis(desc) 0
#define is_activesync(desc) 0
#define is_wireless_rndis(desc) 0
+#define is_novatel_rndis(desc) 0
#endif
@@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
*/
rndis = (is_rndis(&intf->cur_altsetting->desc) ||
is_activesync(&intf->cur_altsetting->desc) ||
- is_wireless_rndis(&intf->cur_altsetting->desc));
+ is_wireless_rndis(&intf->cur_altsetting->desc) ||
+ is_novatel_rndis(&intf->cur_altsetting->desc));
memset(info, 0, sizeof(*info));
info->control = intf;
@@ -547,9 +556,11 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
+#define UBLOX_VENDOR_ID 0x1546
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -737,6 +748,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
@@ -850,6 +870,18 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&zte_cdc_info,
}, {
+ /* U-blox TOBY-L2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
+ /* U-blox SARA-U2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index b99a7fb09f8e..0161f77641fa 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct lan78xx_net *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret)
+ return ret;
ee->magic = LAN78XX_EEPROM_MAGIC;
- return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+ ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
}
static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct lan78xx_net *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret)
+ return ret;
- /* Allow entire eeprom update only */
- if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
- (ee->offset == 0) &&
- (ee->len == 512) &&
- (data[0] == EEPROM_INDICATOR))
- return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+ /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
+ * to load data from EEPROM
+ */
+ if (ee->magic == LAN78XX_EEPROM_MAGIC)
+ ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
(ee->offset == 0) &&
(ee->len == 512) &&
(data[0] == OTP_INDICATOR_1))
- return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
+ ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
+
+ usb_autopm_put_interface(dev->intf);
- return -EINVAL;
+ return ret;
}
static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
@@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* LAN7801 only has RGMII mode */
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
ret = lan78xx_write_reg(dev, MAC_CR, buf);
ret = lan78xx_read_reg(dev, MAC_TX, &buf);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ceb78e2ea4f0..941ece08ba78 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -613,6 +613,7 @@ enum rtl8152_flags {
#define VENDOR_ID_MICROSOFT 0x045e
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -5316,6 +5317,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a151f267aebb..b807c91abe1d 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -632,6 +632,10 @@ static const struct usb_device_id products [] = {
/* RNDIS for tethering */
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
+}, {
+ /* Novatel Verizon USB730L */
+ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
+ .driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..a89b5685e68b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
struct device *dev = i2400m_dev(i2400m);
struct {
struct i2400m_bootrom_header cmd;
- u8 cmd_payload[chunk_len];
+ u8 cmd_payload[];
} __packed *buf;
struct i2400m_bootrom_header ack;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bc1633945a56..195dafb98131 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
-#ifdef CONFIG_PM
-
-static int ath10k_pci_pm_suspend(struct device *dev)
+static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret;
@@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev)
return ret;
}
-static int ath10k_pci_pm_resume(struct device *dev)
+static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret;
@@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
ath10k_pci_pm_suspend,
ath10k_pci_pm_resume);
-#endif
static struct pci_driver ath10k_pci_driver = {
.name = "ath10k_pci",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index aaed4ab503ad..4157c90ad973 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
- params_le->scan_type = 0;
+ params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
params_le->nprobes = cpu_to_le32(-1);
params_le->active_time = cpu_to_le32(-1);
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
params_le->home_time = cpu_to_le32(-1);
memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
- /* if request is null exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
n_ssids = request->n_ssids;
n_channels = request->n_channels;
+
/* Copy channel array if applicable */
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
ptr += sizeof(ssid_le);
}
} else {
- brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
- if ((request->ssids) && request->ssids->ssid_len) {
- brcmf_dbg(SCAN, "SSID %s len=%d\n",
- params_le->ssid_le.SSID,
- request->ssids->ssid_len);
- params_le->ssid_le.SSID_len =
- cpu_to_le32(request->ssids->ssid_len);
- memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
- request->ssids->ssid_len);
- }
+ brcmf_dbg(SCAN, "Performing passive scan\n");
+ params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
}
/* Adding mask to channel numbers */
params_le->channel_num =
@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 status;
struct brcmf_escan_result_le *escan_result_le;
+ u32 escan_buflen;
struct brcmf_bss_info_le *bss_info_le;
struct brcmf_bss_info_le *bss = NULL;
u32 bi_length;
@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (status == BRCMF_E_STATUS_PARTIAL) {
brcmf_dbg(SCAN, "ESCAN Partial result\n");
+ if (e->datalen < sizeof(*escan_result_le)) {
+ brcmf_err("invalid event data length\n");
+ goto exit;
+ }
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
+ escan_buflen = le32_to_cpu(escan_result_le->buflen);
+ if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
+ escan_buflen > e->datalen ||
+ escan_buflen < sizeof(*escan_result_le)) {
+ brcmf_err("Invalid escan buffer length: %d\n",
+ escan_buflen);
+ goto exit;
+ }
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bi_length = le32_to_cpu(bss_info_le->length);
- if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
- WL_ESCAN_RESULTS_FIXED_SIZE)) {
- brcmf_err("Invalid bss_info length %d: ignoring\n",
+ if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
+ brcmf_err("Ignoring invalid bss_info length: %d\n",
bi_length);
goto exit;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 4eb1e1ce9ace..ef72baf6dd96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (code != BRCMF_E_IF && !fweh->evt_handler[code])
return;
- if (datalen > BRCMF_DCMD_MAXLEN)
+ if (datalen > BRCMF_DCMD_MAXLEN ||
+ datalen + sizeof(*event_packet) > packet_len)
return;
if (in_interrupt())
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 8391989b1882..e0d22fedb2b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -45,6 +45,11 @@
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT 0xFF
+#define BRCMF_SCANTYPE_ACTIVE 0
+#define BRCMF_SCANTYPE_PASSIVE 1
+
#define BRCMF_WSEC_MAX_PSK_LEN 32
#define BRCMF_WSEC_PASSPHRASE BIT(0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index b3aab2fe96eb..ef685465f80a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
}
static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
- u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+ const u8 *dlys, u8 len)
{
u32 t1_offset, t2_offset;
u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
{
u16 currband;
- s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
- s8 *lna1_gain_db = NULL;
- s8 *lna1_gain_db_2 = NULL;
- s8 *lna2_gain_db = NULL;
- s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
- s8 *tia_gain_db;
- s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
- s8 *tia_gainbits;
- u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
- u16 *rfseq_init_gain;
+ static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+ const s8 *lna1_gain_db = NULL;
+ const s8 *lna1_gain_db_2 = NULL;
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+ const s8 *tia_gain_db;
+ static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+ const s8 *tia_gainbits;
+ static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+ const u16 *rfseq_init_gain;
u16 init_gaincode;
u16 clip1hi_gaincode;
u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
if ((freq <= 5080) || (freq == 5825)) {
- s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
- s8 lna1A_gain_db_2_rev7[] = {
- 11, 17, 22, 25};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x3e;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else if ((freq >= 5500) && (freq <= 5700)) {
- s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
crsminu_th = 0x45;
clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else {
- s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x41;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_RFSEQ_CMD_SET_HPF_BW
};
- u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
- s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
- s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
- s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
- s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
- s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
- s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
- s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
- s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
- s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
- s8 *lna1_gain_db = NULL;
- s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
- s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
- s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
- s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
- s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
- s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
- s8 *lna2_gain_db = NULL;
- s8 tiaG_gain_db[] = {
+ static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+ static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+ static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+ static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+ static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+ static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+ static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+ static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+ static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+ static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+ const s8 *lna1_gain_db = NULL;
+ static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+ static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+ static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+ static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaG_gain_db[] = {
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
- s8 tiaA_gain_db[] = {
+ static const s8 tiaA_gain_db[] = {
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
- s8 tiaA_gain_db_rev4[] = {
+ static const s8 tiaA_gain_db_rev4[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev5[] = {
+ static const s8 tiaA_gain_db_rev5[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev6[] = {
+ static const s8 tiaA_gain_db_rev6[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 *tia_gain_db;
- s8 tiaG_gainbits[] = {
+ const s8 *tia_gain_db;
+ static const s8 tiaG_gainbits[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
- s8 tiaA_gainbits[] = {
+ static const s8 tiaA_gainbits[] = {
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
- s8 tiaA_gainbits_rev4[] = {
+ static const s8 tiaA_gainbits_rev4[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev5[] = {
+ static const s8 tiaA_gainbits_rev5[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev6[] = {
+ static const s8 tiaA_gainbits_rev6[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 *tia_gainbits;
- s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
- s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
- u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
- u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev5_elna[] = {
+ const s8 *tia_gainbits;
+ static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+ static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+ static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+ static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev5_elna[] = {
0x013f, 0x013f, 0x013f, 0x013f };
- u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
- u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
- u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
- u16 rfseqA_init_gain_rev4_elna[] = {
+ static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+ static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+ static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+ static const u16 rfseqA_init_gain_rev4_elna[] = {
0x314f, 0x314f, 0x314f, 0x314f };
- u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
- u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
- u16 *rfseq_init_gain;
+ static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+ static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+ const u16 *rfseq_init_gain;
u16 initG_gaincode = 0x627e;
u16 initG_gaincode_rev4 = 0x527e;
u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
u16 clip1mdA_gaincode_rev6 = 0x2084;
u16 clip1md_gaincode = 0;
u16 clip1loG_gaincode = 0x0074;
- u16 clip1loG_gaincode_rev5[] = {
+ static const u16 clip1loG_gaincode_rev5[] = {
0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
};
- u16 clip1loG_gaincode_rev6[] = {
+ static const u16 clip1loG_gaincode_rev6[] = {
0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
};
u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
{
- u8 rfseq_rx2tx_events[] = {
+ static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_EXT_PA
};
u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
- u8 rfseq_tx2rx_events[] = {
+ static const u8 rfseq_tx2rx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_EXT_PA,
NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_CLR_HIQ_DIS
};
- u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
- u8 rfseq_tx2rx_events_rev3[] = {
+ static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_EXT_PA,
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
u8 rfseq_rx2tx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
};
u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
- u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+ static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
s16 alpha0, alpha1, alpha2;
s16 beta0, beta1, beta2;
u32 leg_data_weights, ht_data_weights, nss1_data_weights,
stbc_data_weights;
u8 chan_freq_range = 0;
- u16 dac_control = 0x0002;
+ static const u16 dac_control = 0x0002;
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
u16 *aux_adc_gain;
- u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
- u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+ static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
s32 min_nvar_val = 0x18d;
s32 min_nvar_offset_6mbps = 20;
u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
- u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
- u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
- u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+ static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
u16 ipalvlshift_3p3_war_en = 0;
u16 rccal_bcap_val, rccal_scap_val;
u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
u16 bbmult;
u16 tblentry;
- struct nphy_txiqcal_ladder ladder_lo[] = {
+ static const struct nphy_txiqcal_ladder ladder_lo[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
};
- struct nphy_txiqcal_ladder ladder_iq[] = {
+ static const struct nphy_txiqcal_ladder ladder_iq[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u16 cal_gain[2];
struct nphy_iqcal_params cal_params[2];
u32 tbl_len;
- void *tbl_ptr;
+ const void *tbl_ptr;
bool ladder_updated[2];
u8 mphase_cal_lastphase = 0;
int bcmerror = 0;
bool phyhang_avoid_state = false;
- u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
0x1902,
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
0x6407
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
0x3200,
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
0x6407
};
- u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
0x1202,
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
0x4707
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
0x2300,
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
0x4707
};
- u16 tbl_tx_iqlo_cal_startcoefs[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
0x9123, 0x9264, 0x9086, 0x9245, 0x9056
};
- u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
0x9101, 0x9253, 0x9053, 0x9234, 0x9034
};
- u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
};
- u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
};
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 45e2efc70d19..ce741beec1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
.nvm_calib_ver = IWL3168_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
+ .nvm_type = IWL_NVM_SDP,
};
const struct iwl_cfg iwl7265_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 2e6c52664cee..c2a5936ccede 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
.apmg_not_supported = true, \
- .ext_nvm = true, \
+ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true
#define IWL_DEVICE_8000 \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 2babe0a1f18b..e8b5ff42f5a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.rf_id = true, \
- .ext_nvm = true, \
+ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index 76ba1f8bc72f..a440140ed8dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
- .ext_nvm = true, \
+ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true
const struct iwl_cfg iwla000_2ac_cfg_hr = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 00bc7a25dece..3fd07bc80f54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
* @NVM_SECTION_TYPE_REGULATORY: regulatory section
* @NVM_SECTION_TYPE_CALIBRATION: calibration section
* @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
* @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
* @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
* @NVM_MAX_NUM_SECTIONS: number of sections
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
NVM_SECTION_TYPE_REGULATORY = 3,
NVM_SECTION_TYPE_CALIBRATION = 4,
NVM_SECTION_TYPE_PRODUCTION = 5,
+ NVM_SECTION_TYPE_REGULATORY_SDP = 8,
NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
NVM_SECTION_TYPE_PHY_SKU = 12,
NVM_MAX_NUM_SECTIONS = 13,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6afc7a799892..f5dd7d83cd0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/* stop recording */
- iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ iwl_fw_dbg_stop_recording(fwrt);
iwl_fw_error_dump(fwrt);
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
- /* stop recording */
- iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
- udelay(100);
- iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+ iwl_fw_dbg_stop_recording(fwrt);
/* wait before we collect the data till the DBGC stop */
udelay(500);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 0f810ea89d31..9c889a32fe24 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -68,6 +68,8 @@
#include <linux/workqueue.h>
#include <net/cfg80211.h>
#include "runtime.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
#include "file.h"
#include "error-dump.h"
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
+static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+{
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ } else {
+ iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+ udelay(100);
+ iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+ }
+}
+
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
{
+ iwl_fw_dbg_stop_recording(fwrt);
+
fwrt->dump.conf = FW_DBG_INVALID;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e057b539d5b..71cb1ecde0f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -108,6 +108,18 @@ enum iwl_led_mode {
IWL_LED_DISABLE,
};
+/**
+ * enum iwl_nvm_type - nvm formats
+ * @IWL_NVM: the regular format
+ * @IWL_NVM_EXT: extended NVM format
+ * @IWL_NVM_SDP: NVM format used by 3168 series
+ */
+enum iwl_nvm_type {
+ IWL_NVM,
+ IWL_NVM_EXT,
+ IWL_NVM_SDP,
+};
+
/*
* This is the threshold value of plcp error rate per 100mSecs. It is
* used to set and check for the validity of plcp_delta.
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
* @integrated: discrete or integrated
* @gen2: a000 and on transport operation
* @cdb: CDB support
- * @ext_nvm: extended NVM format
+ * @nvm_type: see &enum iwl_nvm_type
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -342,6 +354,7 @@ struct iwl_cfg {
const struct iwl_tt_params *thermal_params;
enum iwl_device_family device_family;
enum iwl_led_mode led_mode;
+ enum iwl_nvm_type nvm_type;
u32 max_data_size;
u32 max_inst_size;
netdev_features_t features;
@@ -369,7 +382,6 @@ struct iwl_cfg {
use_tfh:1,
gen2:1,
cdb:1,
- ext_nvm:1,
dbgc_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3014beef4873..c3a5d8ccc95e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -77,7 +77,7 @@
#include "iwl-csr.h"
/* NVM offsets (in words) definitions */
-enum wkp_nvm_offsets {
+enum nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
SUBSYSTEM_ID = 0x0A,
HW_ADDR = 0x15,
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
/* NVM calibration section offset (in words) definitions */
NVM_CALIB_SECTION = 0x2B8,
- XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+ XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
+
+ /* NVM REGULATORY -Section offset (in words) definitions */
+ NVM_CHANNELS_SDP = 0,
};
enum ext_nvm_offsets {
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DC_HIGH = BIT(12),
};
+static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
+ int chan, u16 flags)
+{
#define CHECK_AND_PRINT_I(x) \
- ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+ ((flags & NVM_CHANNEL_##x) ? " " #x : "")
+
+ if (!(flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
+ chan, flags);
+ return;
+ }
+
+ /* Note: already can print up to 101 characters, 110 is the limit! */
+ IWL_DEBUG_DEV(dev, level,
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ chan, flags,
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(INDOOR_ONLY),
+ CHECK_AND_PRINT_I(GO_CONCURRENT),
+ CHECK_AND_PRINT_I(UNIFORM),
+ CHECK_AND_PRINT_I(20MHZ),
+ CHECK_AND_PRINT_I(40MHZ),
+ CHECK_AND_PRINT_I(80MHZ),
+ CHECK_AND_PRINT_I(160MHZ),
+ CHECK_AND_PRINT_I(DC_HIGH));
+#undef CHECK_AND_PRINT_I
+}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
u16 nvm_flags, const struct iwl_cfg *cfg)
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
u32 flags = IEEE80211_CHAN_NO_HT40;
u32 last_5ghz_ht = LAST_5GHZ_HT;
- if (cfg->ext_nvm)
+ if (cfg->nvm_type == IWL_NVM_EXT)
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
- if (!cfg->ext_nvm) {
+ if (cfg->nvm_type != IWL_NVM_EXT) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS;
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* supported, hence we still want to add them to
* the list of supported channels to cfg80211.
*/
- IWL_DEBUG_EEPROM(dev,
- "Ch. %d Flags %x [%sGHz] - No traffic\n",
- nvm_chan[ch_idx],
- ch_flags,
- (ch_idx >= num_2ghz_channels) ?
- "5.2" : "2.4");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+ nvm_chan[ch_idx], ch_flags);
continue;
}
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
else
channel->flags = 0;
- IWL_DEBUG_EEPROM(dev,
- "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
- channel->hw_value,
- is_5ghz ? "5.2" : "2.4",
- ch_flags,
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(INDOOR_ONLY),
- CHECK_AND_PRINT_I(GO_CONCURRENT),
- CHECK_AND_PRINT_I(UNIFORM),
- CHECK_AND_PRINT_I(20MHZ),
- CHECK_AND_PRINT_I(40MHZ),
- CHECK_AND_PRINT_I(80MHZ),
- CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH),
- channel->max_power,
- ((ch_flags & NVM_CHANNEL_IBSS) &&
- !(ch_flags & NVM_CHANNEL_RADAR))
- ? "" : "not ");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+ channel->hw_value, ch_flags);
+ IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
+ channel->hw_value, channel->max_power);
}
return n_channels;
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + SKU);
return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
return le32_to_cpup((__le32 *)(nvm_sw +
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + RADIO_CFG);
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
int n_hw_addr;
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
u32 radio_cfg)
{
- if (!cfg->ext_nvm) {
+ if (cfg->nvm_type != IWL_NVM_EXT) {
data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
{
if (cfg->mac_addr_from_csr) {
iwl_set_hw_address_from_csr(trans, data);
- } else if (!cfg->ext_nvm) {
+ } else if (cfg->nvm_type != IWL_NVM_EXT) {
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
/* The byte order is little endian 16 bit, meaning 214365 */
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u16 lar_config;
const __le16 *ch_section;
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
IWL_NUM_CHANNELS,
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
- if (!cfg->ext_nvm) {
+ if (cfg->nvm_type != IWL_NVM_EXT) {
/* Checking for required sections */
if (!nvm_calib) {
IWL_ERR(trans,
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
kfree(data);
return NULL;
}
+
+ ch_section = cfg->nvm_type == IWL_NVM_SDP ?
+ &regulatory[NVM_CHANNELS_SDP] :
+ &nvm_sw[NVM_CHANNELS];
+
/* in family 8000 Xtal calibration values moved to OTP */
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
- ch_section = &nvm_sw[NVM_CHANNELS];
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_OLD :
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
u32 flags = NL80211_RRF_NO_HT40;
u32 last_5ghz_ht = LAST_5GHZ_HT;
- if (cfg->ext_nvm)
+ if (cfg->nvm_type == IWL_NVM_EXT)
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
- const u8 *nvm_chan = cfg->ext_nvm ?
+ const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd;
int size_of_regd;
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
- int max_num_ch = cfg->ext_nvm ?
+ int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
new_rule = false;
if (!(ch_flags & NVM_CHANNEL_VALID)) {
- IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d Flags %x [%sGHz] - No traffic\n",
- nvm_chan[ch_idx],
- ch_flags,
- (ch_idx >= NUM_2GHZ_CHANNELS) ?
- "5.2" : "2.4");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+ nvm_chan[ch_idx], ch_flags);
continue;
}
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
prev_center_freq = center_freq;
prev_reg_rule_flags = reg_rule_flags;
- IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n",
- center_freq,
- band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(INDOOR_ONLY),
- CHECK_AND_PRINT_I(GO_CONCURRENT),
- CHECK_AND_PRINT_I(UNIFORM),
- CHECK_AND_PRINT_I(20MHZ),
- CHECK_AND_PRINT_I(40MHZ),
- CHECK_AND_PRINT_I(80MHZ),
- CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH),
- ch_flags);
- IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
- center_freq,
- band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
- reg_rule_flags,
- ((ch_flags & NVM_CHANNEL_ACTIVE) &&
- !(ch_flags & NVM_CHANNEL_RADAR))
- ? "Ad-Hoc" : "");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+ nvm_chan[ch_idx], ch_flags);
}
regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 5de19ea10575..b205a7bfb828 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2167,7 +2167,7 @@ out:
* 1. We are not using a unified image
* 2. We are using a unified image but had an error while exiting D3
*/
- set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
/*
* When switching images we return 1, which causes mac80211
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 15f2d826bb4b..a9ac872226fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ mvm->monitor_on = false;
/* keep statistics ticking */
iwl_mvm_accu_radio_stats(mvm);
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = vif;
}
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = true;
+
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = false;
+
out_release:
mutex_unlock(&mvm->mutex);
}
@@ -1546,6 +1553,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
struct iwl_mvm_mc_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
int ret, len;
/* if we don't have free ports, mcast frames will be dropped */
@@ -1560,7 +1572,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
@@ -1635,6 +1650,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
if (!cmd)
goto out;
+ if (changed_flags & FIF_ALLMULTI)
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+ if (cmd->pass_all)
+ cmd->count = 0;
+
iwl_mvm_recalc_multicast(mvm);
out:
mutex_unlock(&mvm->mutex);
@@ -2563,7 +2584,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
* queues, so we should never get a second deferred
* frame for the RA/TID.
*/
- iwl_mvm_start_mac_queues(mvm, info->hw_queue);
+ iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
ieee80211_free_txskb(mvm->hw, skb);
}
}
@@ -3975,6 +3996,43 @@ out_unlock:
return ret;
}
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+{
+ if (drop) {
+ if (iwl_mvm_has_new_tx_api(mvm))
+ /* TODO new tx api */
+ WARN_ONCE(1,
+ "Need to implement flush TX queue\n");
+ else
+ iwl_mvm_flush_tx_path(mvm,
+ iwl_mvm_flushable_queues(mvm) & queues,
+ 0);
+ } else {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct ieee80211_sta *sta;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ iwl_mvm_wait_sta_queues_empty(mvm,
+ iwl_mvm_sta_from_mac80211(sta));
+ }
+
+ mutex_unlock(&mvm->mutex);
+ } else {
+ iwl_trans_wait_tx_queues_empty(mvm->trans,
+ queues);
+ }
+ }
+}
+
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u32 queues, bool drop)
{
@@ -3985,7 +4043,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
int i;
u32 msk = 0;
- if (!vif || vif->type != NL80211_IFTYPE_STATION)
+ if (!vif) {
+ iwl_mvm_flush_no_vif(mvm, queues, drop);
+ return;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION)
return;
/* Make sure we're done with the deferred traffic before flushing */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 83303bac0e4b..949e63418299 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1015,6 +1015,9 @@ struct iwl_mvm {
bool drop_bcn_ap_mode;
struct delayed_work cs_tx_unblock_dwork;
+
+ /* does a monitor vif exist (only one can exist hence bool) */
+ bool monitor_on;
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
*/
- if (mvm->cfg->ext_nvm)
+ if (mvm->cfg->nvm_type == IWL_NVM_EXT)
return nvm_lar && tlv_lar;
else
return tlv_lar;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 422aa6be9932..fb25b6f29323 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
bool lar_enabled;
+ int regulatory_type;
/* Checking for required sections */
- if (!mvm->trans->cfg->ext_nvm) {
+ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
return NULL;
}
} else {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+ else
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
/* SW and REGULATORY sections are mandatory */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+ !mvm->nvm_sections[regulatory_type].data) {
IWL_ERR(mvm,
"Can't parse empty family 8000 OTP/NVM sections\n");
return NULL;
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
- regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
mac_override =
(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+ regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
lar_enabled = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
/* Maximal size depends on NVM version */
- if (!mvm->trans->cfg->ext_nvm)
+ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else
max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (!mvm->trans->cfg->ext_nvm) {
+ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
section_size =
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
struct ieee80211_regdomain *regd;
char mcc[3];
- if (mvm->cfg->ext_nvm) {
+ if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index ba7bd049d3d4..0fe723ca844e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
(lq_sta->tx_agg_tid_en & BIT(tid)) &&
(tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
- rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta);
+ if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
+ tid_data->state = IWL_AGG_QUEUED;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 184c749766f2..2d14a58cbdd7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return 0;
default:
- IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+ /* Expected in monitor (not having the keys) */
+ if (!mvm->monitor_on)
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 67ffd9774712..248699c2c4bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
default:
- IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
+ /* Expected in monitor (not having the keys) */
+ if (!mvm->monitor_on)
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
}
return 0;
@@ -672,11 +674,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
* the reorder buffer, in which case we just release up to it and the
- * rest of the function will take of storing it and releasing up to the
- * nssn
+ * rest of the function will take care of storing it and releasing up to
+ * the nssn
*/
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
- buffer->buf_size)) {
+ buffer->buf_size) ||
+ !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 50983615dce6..774122fed454 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_ABORT_CMD,
};
- u32 status;
+ u32 status = CAN_ABORT_STATUS;
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 411a2055dc45..c4a343534c5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
{
struct iwl_mvm_add_sta_cmd cmd;
int ret;
- u32 status;
+ u32 status = ADD_STA_SUCCESS;
lockdep_assert_held(&mvm->mutex);
@@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
- IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
+ if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
+ mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+ IWL_ERR(mvm,
+ "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
mvmsta->tid_data[tid].state);
return -ENXIO;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d13893806513..aedabe101cf0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -281,6 +281,7 @@ struct iwl_mvm_vif;
* These states relate to a specific RA / TID.
*
* @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_QUEUED: aggregation start work has been queued
* @IWL_AGG_STARTING: aggregation are starting (between start and oper)
* @IWL_AGG_ON: aggregation session is up
* @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
@@ -290,6 +291,7 @@ struct iwl_mvm_vif;
*/
enum iwl_mvm_agg_state {
IWL_AGG_OFF = 0,
+ IWL_AGG_QUEUED,
IWL_AGG_STARTING,
IWL_AGG_ON,
IWL_EMPTYING_HW_QUEUE_ADDBA,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 8876c2abc440..1232f63278eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
lockdep_assert_held(&mvm->mutex);
+ status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
CTDP_CONFIG_CMD),
sizeof(cmd), &cmd, &status);
@@ -630,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
if (!iwl_mvm_firmware_running(mvm) ||
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
+ ret = -ENODATA;
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 172b5e63d3fb..6f2e2af23219 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
/*
- * Handle legacy hostapd as well, where station will be added
- * only just before sending the association response.
+ * Non-bufferable frames use the broadcast station, thus they
+ * use the probe queue.
* Also take care of the case where we send a deauth to a
* station that we don't have, or similarly an association
* response (with non-success status) for a station we can't
@@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
* Also, disassociate frames might happen, particular with
* reason 7 ("Class 3 frame received from nonassociated STA").
*/
- if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
- ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) ||
- ieee80211_is_disassoc(fc))
+ if (ieee80211_is_mgmt(fc) &&
+ (!ieee80211_is_bufferable_mmpdu(fc) ||
+ ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return mvmvif->cab_queue;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 856fa6e8327e..a450bc6bc774 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
vif = qtnf_netdev_get_priv(wdev->netdev);
+ qtnf_scan_done(vif->mac, true);
+
if (qtnf_cmd_send_del_intf(vif))
pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
vif->vifid);
@@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
+ qtnf_scan_done(vif->mac, true);
+
ret = qtnf_cmd_send_stop_ap(vif);
if (ret) {
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
@@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
!qtnf_sta_list_lookup(&vif->sta_list, params->mac))
return 0;
- qtnf_scan_done(vif->mac, true);
-
ret = qtnf_cmd_send_del_sta(vif, params);
if (ret)
pr_err("VIF%u.%u: failed to delete STA %pM\n",
@@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
}
vif->sta_state = QTNF_STA_DISCONNECTED;
- qtnf_scan_done(mac, true);
}
+
+ qtnf_scan_done(mac, true);
}
void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 6a4af52522b8..66db26613b1f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
.aborted = aborted,
};
+ if (timer_pending(&mac->scan_timeout))
+ del_timer_sync(&mac->scan_timeout);
+
mutex_lock(&mac->mac_lock);
if (mac->scan_req) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 0fc2814eafad..43d2e7fd6e02 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
return -EINVAL;
}
- if (timer_pending(&mac->scan_timeout))
- del_timer_sync(&mac->scan_timeout);
qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 502e72b7cdcc..69131965a298 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
dma_addr_t txbd_paddr, skb_paddr;
struct qtnf_tx_bd *txbd;
+ unsigned long flags;
int len, i;
u32 info;
int ret = 0;
+ spin_lock_irqsave(&priv->tx0_lock, flags);
+
if (!qtnf_tx_queue_ready(priv)) {
if (skb->dev)
netif_stop_queue(skb->dev);
+ spin_unlock_irqrestore(&priv->tx0_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -717,8 +721,10 @@ tx_done:
dev_kfree_skb_any(skb);
}
- qtnf_pcie_data_tx_reclaim(priv);
priv->tx_done_count++;
+ spin_unlock_irqrestore(&priv->tx0_lock, flags);
+
+ qtnf_pcie_data_tx_reclaim(priv);
return NETDEV_TX_OK;
}
@@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
init_completion(&bus->request_firmware_complete);
mutex_init(&bus->bus_lock);
+ spin_lock_init(&pcie_priv->tx0_lock);
spin_lock_init(&pcie_priv->irq_lock);
spin_lock_init(&pcie_priv->tx_reclaim_lock);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
index e76a23716ee0..86ac1ccedb52 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
@@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv {
/* lock for tx reclaim operations */
spinlock_t tx_reclaim_lock;
+ /* lock for tx0 operations */
+ spinlock_t tx0_lock;
u8 msi_enabled;
int mps;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 4f73012978e9..1d431d4bf6d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_byte(rtlpriv, read_addr);
+ ret = rtl_read_word(rtlpriv, read_addr);
}
return ret;
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ee8ed9da00ad..4491ca5aee90 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
- dev->min_mtu = 0;
+ dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 523387e71a80..8b8689c6d887 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netdev->features |= netdev->hw_features;
netdev->ethtool_ops = &xennet_ethtool_ops;
- netdev->min_mtu = 0;
+ netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
SET_NETDEV_DEV(netdev, &dev->dev);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 1427a386a033..3e4d1e7998da 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev)
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_index *nsindex;
+ /*
+ * If any of the DIMMs do not support labels the only
+ * possible BTT format is v1.
+ */
+ if (!ndd) {
+ loop_bitmask = 0;
+ break;
+ }
+
nsindex = to_namespace_index(ndd, ndd->ns_current);
if (nsindex == NULL)
loop_bitmask |= 1;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index acc816b67582..5a14cc7f28ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req)
return false;
if (nvme_req(req)->status & NVME_SC_DNR)
return false;
- if (jiffies - req->start_time >= req->timeout)
- return false;
if (nvme_req(req)->retries >= nvme_max_retries)
return false;
return true;
@@ -2138,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
if (a == &dev_attr_uuid.attr) {
- if (uuid_is_null(&ns->uuid) ||
+ if (uuid_is_null(&ns->uuid) &&
!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return 0;
}
@@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, async_event_work);
spin_lock_irq(&ctrl->lock);
- while (ctrl->event_limit > 0) {
+ while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
int aer_idx = --ctrl->event_limit;
spin_unlock_irq(&ctrl->lock);
@@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
/*FALLTHRU*/
case NVME_SC_ABORT_REQ:
++ctrl->event_limit;
- queue_work(nvme_wq, &ctrl->async_event_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ queue_work(nvme_wq, &ctrl->async_event_work);
break;
default:
break;
@@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- schedule_work(&ctrl->fw_act_work);
+ queue_work(nvme_wq, &ctrl->fw_act_work);
break;
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 47307752dc65..555c976cc2ee 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
opts->nr_io_queues = num_online_cpus();
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
+ opts->kato = NVME_DEFAULT_KATO;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
goto out;
}
- if (opts->discovery_nqn) {
- pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
- ret = -EINVAL;
- goto out;
- }
-
if (token < 0) {
pr_err("Invalid keep_alive_tmo %d\n", token);
ret = -EINVAL;
goto out;
- } else if (token == 0) {
+ } else if (token == 0 && !opts->discovery_nqn) {
/* Allowed for debug */
pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
}
opts->kato = token;
+
+ if (opts->discovery_nqn && opts->kato) {
+ pr_err("Discovery controllers cannot accept KATO != 0\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
break;
case NVMF_OPT_CTRL_LOSS_TMO:
if (match_int(args, &token)) {
@@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
uuid_copy(&opts->host->id, &hostid);
out:
- if (!opts->discovery_nqn && !opts->kato)
- opts->kato = NVME_DEFAULT_KATO;
kfree(options);
return ret;
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d2e882c0f496..af075e998944 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
else if (freq->status)
- status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+ status = cpu_to_le16(NVME_SC_INTERNAL << 1);
/*
* For the linux implementation, if we have an unsuccesful
@@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
*/
if (freq->transferred_length !=
be32_to_cpu(op->cmd_iu.data_len)) {
- status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+ status = cpu_to_le16(NVME_SC_INTERNAL << 1);
goto done;
}
result.u64 = 0;
@@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
freq->transferred_length ||
op->rsp_iu.status_code ||
sqe->common.command_id != cqe->command_id)) {
- status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+ status = cpu_to_le16(NVME_SC_INTERNAL << 1);
goto done;
}
result = cqe->result;
@@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
break;
default:
- status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+ status = cpu_to_le16(NVME_SC_INTERNAL << 1);
goto done;
}
@@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
* as well as those by FC-NVME spec.
*/
WARN_ON_ONCE(sqe->common.metadata);
- WARN_ON_ONCE(sqe->common.dptr.prp1);
- WARN_ON_ONCE(sqe->common.dptr.prp2);
sqe->common.flags |= NVME_CMD_SGL_METABUF;
/*
- * format SQE DPTR field per FC-NVME rules
- * type=data block descr; subtype=offset;
- * offset is currently 0.
+ * format SQE DPTR field per FC-NVME rules:
+ * type=0x5 Transport SGL Data Block Descriptor
+ * subtype=0xA Transport-specific value
+ * address=0
+ * length=length of the data series
*/
- sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
+ sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+ NVME_SGL_FMT_TRANSPORT_A;
sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
sqe->rw.dptr.sgl.addr = 0;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4a2121335f48..3f5a04c586ce 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/once.h>
#include <linux/pci.h>
#include <linux/poison.h>
#include <linux/t10-pi.h>
@@ -93,7 +94,7 @@ struct nvme_dev {
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
- dma_addr_t cmb_dma_addr;
+ pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
u32 cmbsz;
u32 cmbloc;
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
+static void nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+ "dma_address:%pad dma_length:%d\n",
+ i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+}
+
static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
return BLK_STS_OK;
bad_sgl:
- if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents)) {
- for_each_sg(iod->sg, sg, iod->nents, i) {
- dma_addr_t phys = sg_phys(sg);
- pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
- "dma_address:%pad dma_length:%d\n", i, &phys,
- sg->offset, sg->length,
- &sg_dma_address(sg),
- sg_dma_len(sg));
- }
- }
+ WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ "Invalid SGL for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), iod->nents);
return BLK_STS_IOERR;
-
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -1220,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
nvmeq->sq_cmds_io = dev->cmb + offset;
} else {
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
+ nvme_init_queue(nvmeq, qid);
result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
- nvme_init_queue(nvmeq, qid);
return result;
release_sq:
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
return result;
nvmeq->cq_vector = 0;
+ nvme_init_queue(nvmeq, 0);
result = queue_request_irq(nvmeq);
if (result) {
nvmeq->cq_vector = -1;
@@ -1520,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
- dma_addr_t dma_addr;
+ int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1533,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
offset = szu * NVME_CMB_OFST(dev->cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+ bar = NVME_CMB_BIR(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
return NULL;
@@ -1546,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
- cmb = ioremap_wc(dma_addr, size);
+ cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
if (!cmb)
return NULL;
- dev->cmb_dma_addr = dma_addr;
+ dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
return cmb;
}
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out;
- nvme_init_queue(dev->queues[0], 0);
result = nvme_alloc_admin_tags(dev);
if (result)
goto out;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 58983000964b..92a03ff5fb4d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -942,7 +942,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!changed) {
+ /* state change failure is ok if we're in DELETING state */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ return;
+ }
+
ctrl->ctrl.nr_reconnects = 0;
nvme_start_ctrl(&ctrl->ctrl);
@@ -962,7 +967,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
- nvme_stop_ctrl(&ctrl->ctrl);
+ nvme_stop_keep_alive(&ctrl->ctrl);
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 7c23eaf8e563..1b208beeef50 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
if (status)
nvmet_set_status(req, status);
- /* XXX: need to fill in something useful for sq_head */
- req->rsp->sq_head = 0;
- if (likely(req->sq)) /* may happen during early failure */
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
+ if (req->sq->size)
+ req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
+ req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
+ req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;
if (req->ns)
@@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
u16 qid, u16 size)
{
+ sq->sqhd = 0;
sq->qid = qid;
sq->size = size;
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 859a66725291..db3bf6b8bf9e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
pr_warn("queue already connected!\n");
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
}
+ if (!sqsize) {
+ pr_warn("queue size zero!\n");
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
- nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
- nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
+ /* note: convert queue size from 0's-based value to 1's-based value */
+ nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
+ nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
return 0;
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 421e43bf1dd7..58e010bdda3e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc {
u32 a_id;
struct nvmet_fc_tgtport *tgtport;
struct list_head a_list;
- struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
};
@@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
unsigned long flags;
int ret;
- if (qid >= NVMET_NR_QUEUES)
+ if (qid > NVMET_NR_QUEUES)
return NULL;
queue = kzalloc((sizeof(*queue) +
@@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
u16 qid = nvmet_fc_getqueueid(connection_id);
unsigned long flags;
+ if (qid > NVMET_NR_QUEUES)
+ return NULL;
+
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
@@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
int i;
spin_lock_irqsave(&tgtport->lock, flags);
- for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
queue = assoc->queues[i];
if (queue) {
if (!nvmet_fc_tgt_q_get(queue))
@@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
spin_lock_irqsave(&fod->flock, flags);
fod->writedataactive = false;
spin_unlock_irqrestore(&fod->flock, flags);
- nvmet_req_complete(&fod->req,
- NVME_SC_FC_TRANSPORT_ERROR);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
fcpreq->fcp_error = ret;
fcpreq->transferred_length = 0;
@@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
/* if in the middle of an io and we need to tear down */
if (abort) {
if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
- nvmet_req_complete(&fod->req,
- NVME_SC_FC_TRANSPORT_ERROR);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return true;
}
@@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
fod->abort = true;
spin_unlock(&fod->flock);
- nvmet_req_complete(&fod->req,
- NVME_SC_FC_TRANSPORT_ERROR);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return;
}
@@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port)
{
struct nvmet_fc_tgtport *tgtport = port->priv;
unsigned long flags;
+ bool matched = false;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
if (tgtport->port == port) {
- nvmet_fc_tgtport_put(tgtport);
+ matched = true;
tgtport->port = NULL;
}
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (matched)
+ nvmet_fc_tgtport_put(tgtport);
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1cb9847ec261..7b75d9de55ab 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -224,8 +224,6 @@ struct fcloop_nport {
struct fcloop_lport *lport;
struct list_head nport_list;
struct kref ref;
- struct completion rport_unreg_done;
- struct completion tport_unreg_done;
u64 node_name;
u64 port_name;
u32 port_role;
@@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
tfcp_req->aborted = true;
spin_unlock(&tfcp_req->reqlock);
- tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+ tfcp_req->status = NVME_SC_INTERNAL;
/*
* nothing more to do. If io wasn't active, the transport should
@@ -631,6 +629,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
static void
+fcloop_nport_free(struct kref *ref)
+{
+ struct fcloop_nport *nport =
+ container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+ kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+ return kref_get_unless_zero(&nport->ref);
+}
+
+static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
struct fcloop_lport *lport = localport->private;
@@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
- /* release any threads waiting for the unreg to complete */
- complete(&rport->nport->rport_unreg_done);
+ fcloop_nport_put(rport->nport);
}
static void
@@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
- /* release any threads waiting for the unreg to complete */
- complete(&tport->nport->tport_unreg_done);
+ fcloop_nport_put(tport->nport);
}
#define FCLOOP_HW_QUEUES 4
@@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
goto out_free_opts;
}
+ memset(&pinfo, 0, sizeof(pinfo));
pinfo.node_name = opts->wwnn;
pinfo.port_name = opts->wwpn;
pinfo.port_role = opts->roles;
@@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
-static void
-fcloop_nport_free(struct kref *ref)
-{
- struct fcloop_nport *nport =
- container_of(ref, struct fcloop_nport, ref);
- unsigned long flags;
-
- spin_lock_irqsave(&fcloop_lock, flags);
- list_del(&nport->nport_list);
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
- kfree(nport);
-}
-
-static void
-fcloop_nport_put(struct fcloop_nport *nport)
-{
- kref_put(&nport->ref, fcloop_nport_free);
-}
-
-static int
-fcloop_nport_get(struct fcloop_nport *nport)
-{
- return kref_get_unless_zero(&nport->ref);
-}
-
static struct fcloop_nport *
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
{
@@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
if (!nport)
return -EIO;
+ memset(&pinfo, 0, sizeof(pinfo));
pinfo.node_name = nport->node_name;
pinfo.port_name = nport->port_name;
pinfo.port_role = nport->port_role;
@@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport)
}
static int
-__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
{
- int ret;
-
if (!rport)
return -EALREADY;
- init_completion(&nport->rport_unreg_done);
-
- ret = nvme_fc_unregister_remoteport(rport->remoteport);
- if (ret)
- return ret;
-
- wait_for_completion(&nport->rport_unreg_done);
-
- fcloop_nport_put(nport);
-
- return ret;
+ return nvme_fc_unregister_remoteport(rport->remoteport);
}
static ssize_t
@@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
if (!nport)
return -ENOENT;
- ret = __wait_remoteport_unreg(nport, rport);
+ ret = __remoteport_unreg(nport, rport);
return ret ? ret : count;
}
@@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport)
}
static int
-__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
{
- int ret;
-
if (!tport)
return -EALREADY;
- init_completion(&nport->tport_unreg_done);
-
- ret = nvmet_fc_unregister_targetport(tport->targetport);
- if (ret)
- return ret;
-
- wait_for_completion(&nport->tport_unreg_done);
-
- fcloop_nport_put(nport);
-
- return ret;
+ return nvmet_fc_unregister_targetport(tport->targetport);
}
static ssize_t
@@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
if (!nport)
return -ENOENT;
- ret = __wait_targetport_unreg(nport, tport);
+ ret = __targetport_unreg(nport, tport);
return ret ? ret : count;
}
@@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void)
spin_unlock_irqrestore(&fcloop_lock, flags);
- ret = __wait_targetport_unreg(nport, tport);
+ ret = __targetport_unreg(nport, tport);
if (ret)
pr_warn("%s: Failed deleting target port\n", __func__);
- ret = __wait_remoteport_unreg(nport, rport);
+ ret = __remoteport_unreg(nport, rport);
if (ret)
pr_warn("%s: Failed deleting remote port\n", __func__);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7d261ab894f4..7b8e20adf760 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,6 +74,7 @@ struct nvmet_sq {
struct percpu_ref ref;
u16 qid;
u16 size;
+ u16 sqhd;
struct completion free_done;
struct completion confirm_done;
};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index de54c7f5048a..d12e5de78e70 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -135,7 +135,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
/* Stop the user from writing */
if (pos >= nvmem->size)
- return 0;
+ return -EFBIG;
if (count < nvmem->word_size)
return -EINVAL;
@@ -789,6 +789,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
return ERR_PTR(-EINVAL);
nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+ of_node_put(nvmem_np);
if (IS_ERR(nvmem))
return ERR_CAST(nvmem);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 260d33c0f26c..63897531cd75 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
{
if (!dn || dn != of_stdout || console_set_on_cmdline)
return false;
- return !add_preferred_console(name, index,
- kstrdup(of_stdout_options, GFP_KERNEL));
+
+ /*
+ * XXX: cast `options' to char pointer to suppress complication
+ * warnings: printk, UART and console drivers expect char pointer.
+ */
+ return !add_preferred_console(name, index, (char *)of_stdout_options);
}
EXPORT_SYMBOL_GPL(of_console_check);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d94dd8b77abd..98258583abb0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
return -EINVAL;
}
-static void of_mdiobus_register_phy(struct mii_bus *mdio,
+static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
else
phy = get_phy_device(mdio, addr, is_c45);
if (IS_ERR(phy))
- return;
+ return PTR_ERR(phy);
- rc = irq_of_parse_and_map(child, 0);
+ rc = of_irq_get(child, 0);
+ if (rc == -EPROBE_DEFER) {
+ phy_device_free(phy);
+ return rc;
+ }
if (rc > 0) {
phy->irq = rc;
mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
if (rc) {
phy_device_free(phy);
of_node_put(child);
- return;
+ return rc;
}
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
child->name, addr);
+ return 0;
}
-static void of_mdiobus_register_device(struct mii_bus *mdio,
- struct device_node *child, u32 addr)
+static int of_mdiobus_register_device(struct mii_bus *mdio,
+ struct device_node *child, u32 addr)
{
struct mdio_device *mdiodev;
int rc;
mdiodev = mdio_device_create(mdio, addr);
if (IS_ERR(mdiodev))
- return;
+ return PTR_ERR(mdiodev);
/* Associate the OF node with the device structure so it
* can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
if (rc) {
mdio_device_free(mdiodev);
of_node_put(child);
- return;
+ return rc;
}
dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
child->name, addr);
+ return 0;
}
/* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
if (of_mdiobus_child_is_phy(child))
- of_mdiobus_register_phy(mdio, child, addr);
+ rc = of_mdiobus_register_phy(mdio, child, addr);
else
- of_mdiobus_register_device(mdio, child, addr);
+ rc = of_mdiobus_register_device(mdio, child, addr);
+ if (rc)
+ goto unregister;
}
if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
dev_info(&mdio->dev, "scan phy %s at address %i\n",
child->name, addr);
- if (of_mdiobus_child_is_phy(child))
- of_mdiobus_register_phy(mdio, child, addr);
+ if (of_mdiobus_child_is_phy(child)) {
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
+ goto unregister;
+ }
}
}
return 0;
+
+unregister:
+ mdiobus_unregister(mdio);
+ return rc;
}
EXPORT_SYMBOL(of_mdiobus_register);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index d507c3569a88..32771c2ced7b 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,7 +25,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#define MAX_RESERVED_REGIONS 16
+#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index fbb72116e9d4..264c355ba1ff 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
struct device_node *np;
/* Get the parent of the port */
- np = of_get_next_parent(to_of_node(fwnode));
+ np = of_get_parent(to_of_node(fwnode));
if (!np)
return NULL;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 4ddc6e8f9fe7..f9308c2f22e6 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -251,9 +251,8 @@ err:
return ret;
}
-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
{
- u8 irq;
u8 msi_count;
struct pci_epf *epf = epf_test->epf;
struct pci_epc *epc = epf->epc;
@@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
reg->status |= STATUS_IRQ_RAISED;
msi_count = pci_epc_get_msi(epc);
- irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
if (irq > msi_count || msi_count <= 0)
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
else
@@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->command = 0;
reg->status = 0;
+ irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
@@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_WRITE_FAIL;
else
reg->status |= STATUS_WRITE_SUCCESS;
- pci_epf_test_raise_irq(epf_test);
+ pci_epf_test_raise_irq(epf_test, irq);
goto reset_handler;
}
@@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_READ_SUCCESS;
else
reg->status |= STATUS_READ_FAIL;
- pci_epf_test_raise_irq(epf_test);
+ pci_epf_test_raise_irq(epf_test, irq);
goto reset_handler;
}
@@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_COPY_SUCCESS;
else
reg->status |= STATUS_COPY_FAIL;
- pci_epf_test_raise_irq(epf_test);
+ pci_epf_test_raise_irq(epf_test, irq);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSI_IRQ) {
msi_count = pci_epc_get_msi(epc);
- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
if (irq > msi_count || msi_count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 89f4e3d072d7..26ed0c08f209 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
bridge->sysdata = pcie;
bridge->busnr = 0;
bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0) {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 9c40da54f88a..1987fec1f126 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,6 +233,7 @@ struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
+ unsigned long pages;
struct mutex lock;
u64 phys;
int irq;
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
goto err;
}
- /*
- * The PCI host bridge on Tegra contains some logic that intercepts
- * MSI writes, which means that the MSI target address doesn't have
- * to point to actual physical memory. Rather than allocating one 4
- * KiB page of system memory that's never used, we can simply pick
- * an arbitrary address within an area reserved for system memory
- * in the FPCI address map.
- *
- * However, in order to avoid confusion, we pick an address that
- * doesn't map to physical memory. The FPCI address map reserves a
- * 1012 GiB region for system memory and memory-mapped I/O. Since
- * none of the Tegra SoCs that contain this PCI host bridge can
- * address more than 16 GiB of system memory, the last 4 KiB of
- * these 1012 GiB is a good candidate.
- */
- msi->phys = 0xfcfffff000;
+ /* setup AFI/FPCI range */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ msi->phys = virt_to_phys((void *)msi->pages);
afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+ free_pages(msi->pages, 0);
+
if (msi->irq > 0)
free_irq(msi->irq, pcie);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 1eecfa301f7f..8e075ea2743e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
@@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev,
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
@@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len;
- return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 0a9b78705ee8..3303dd8d8eb5 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
ret = armpmu_register(pmu);
if (ret) {
pr_warn("Failed to register PMU for CPU%d\n", cpu);
+ kfree(pmu->name);
return ret;
}
}
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 73ebad6634a7..89c887ea5557 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -111,6 +111,8 @@
#define MVEBU_COMPHY_CONF6_40B BIT(18)
#define MVEBU_COMPHY_SELECTOR 0x1140
#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
+#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
+#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
#define MVEBU_COMPHY_LANES 6
#define MVEBU_COMPHY_PORTS 3
@@ -468,13 +470,17 @@ static int mvebu_comphy_power_on(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
- int ret;
- u32 mux, val;
+ int ret, mux;
+ u32 val;
mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode);
if (mux < 0)
return -ENOTSUPP;
+ regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+ val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+ regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val);
val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id);
@@ -526,6 +532,10 @@ static int mvebu_comphy_power_off(struct phy *phy)
val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val);
+ regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+ val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+ regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
return 0;
}
@@ -576,8 +586,8 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
return PTR_ERR(priv->regmap);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (!priv->base)
- return -ENOMEM;
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_comphy_lane *lane;
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index e3baad78521f..721a2a1c97ef 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -27,6 +27,7 @@
/* banks shared by multiple phys */
#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */
+#define SSUSB_SIFSLV_V1_CHIP 0x300 /* shared by u3 phys */
/* u2 phy bank */
#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000
/* u3/pcie/sata phy banks */
@@ -762,7 +763,7 @@ static void phy_v1_banks_init(struct mtk_tphy *tphy,
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
- u3_banks->chip = NULL;
+ u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP;
u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
break;
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 4d2c57f21d76..a958c9bced01 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -443,14 +443,34 @@ static inline int property_enable(struct rockchip_typec_phy *tcphy,
return regmap_write(tcphy->grf_regs, reg->offset, val | mask);
}
+static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
+{
+ u16 tx_ana_ctrl_reg_1;
+
+ /*
+ * Select the polarity of the xcvr:
+ * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
+ * down aux_m)
+ * 0, Normal polarity (if TYPEC, pulls up aux_m and pulls down
+ * aux_p)
+ */
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ if (!tcphy->flip)
+ tx_ana_ctrl_reg_1 |= BIT(12);
+ else
+ tx_ana_ctrl_reg_1 &= ~BIT(12);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+}
+
static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
{
+ u16 tx_ana_ctrl_reg_1;
u16 rdata, rdata2, val;
/* disable txda_cal_latch_en for rewrite the calibration values */
- rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata & 0xdfff;
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 &= ~BIT(13);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
@@ -472,9 +492,8 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
* Activate this signal for 1 clock cycle to sample new calibration
* values.
*/
- rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata | 0x2000;
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(13);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
usleep_range(150, 200);
/* set TX Voltage Level and TX Deemphasis to 0 */
@@ -482,8 +501,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
/* re-enable decap */
writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1);
- writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(3);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(4);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
@@ -494,8 +515,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
/* re-enables Bandgap reference for LDO */
- writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1);
- writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(7);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(8);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* re-enables the transmitter pre-driver, driver data selection MUX,
@@ -505,27 +528,26 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
/*
- * BIT 12: Controls auxda_polarity, which selects the polarity of the
- * xcvr:
- * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
- * down aux_m)
- * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down
- * aux_p)
+ * Do some magic undocumented stuff, some of which appears to
+ * undo the "re-enables Bandgap reference for LDO" above.
*/
- val = 0xa078;
- if (!tcphy->flip)
- val |= BIT(12);
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(15);
+ tx_ana_ctrl_reg_1 &= ~BIT(8);
+ tx_ana_ctrl_reg_1 &= ~BIT(7);
+ tx_ana_ctrl_reg_1 |= BIT(6);
+ tx_ana_ctrl_reg_1 |= BIT(5);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
/*
- * Controls low_power_swing_en, set the voltage swing of the driver
- * to 400mv. The values below are peak to peak (differential) values.
+ * Controls low_power_swing_en, don't set the voltage swing of the
+ * driver to 400mv. The values below are peak to peak (differential)
+ * values.
*/
- writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL);
+ writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
/* Controls tx_high_z_tm_en */
@@ -555,6 +577,7 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
reset_control_deassert(tcphy->tcphy_rst);
property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip);
+ tcphy_dp_aux_set_flip(tcphy);
tcphy_cfg_24m(tcphy);
@@ -685,8 +708,11 @@ static int rockchip_usb3_phy_power_on(struct phy *phy)
if (tcphy->mode == new_mode)
goto unlock_ret;
- if (tcphy->mode == MODE_DISCONNECT)
- tcphy_phy_init(tcphy, new_mode);
+ if (tcphy->mode == MODE_DISCONNECT) {
+ ret = tcphy_phy_init(tcphy, new_mode);
+ if (ret)
+ goto unlock_ret;
+ }
/* wait TCPHY for pipe ready */
for (timeout = 0; timeout < 100; timeout++) {
@@ -760,10 +786,12 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
*/
if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) {
tcphy_phy_deinit(tcphy);
- tcphy_phy_init(tcphy, new_mode);
+ ret = tcphy_phy_init(tcphy, new_mode);
} else if (tcphy->mode == MODE_DISCONNECT) {
- tcphy_phy_init(tcphy, new_mode);
+ ret = tcphy_phy_init(tcphy, new_mode);
}
+ if (ret)
+ goto unlock_ret;
ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
val, val & DP_MODE_A2, 1000,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 3cbcb2537657..4307bf0013e1 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -454,6 +454,8 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
char *name;
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
np = of_find_node_by_name(np, name);
kfree(name);
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1778cf4f81c7..82cd8b08d71f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -100,6 +100,7 @@ config PINCTRL_AMD
tristate "AMD GPIO pin control"
depends on GPIOLIB
select GPIOLIB_IRQCHIP
+ select PINMUX
select PINCONF
select GENERIC_PINCONF
help
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0944310225db..ff782445dfb7 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
unsigned long events;
unsigned offset;
unsigned gpio;
- unsigned int type;
events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
events &= mask;
events &= pc->enabled_irq_map[bank];
for_each_set_bit(offset, &events, 32) {
gpio = (32 * bank) + offset;
- /* FIXME: no clue why the code looks up the type here */
- type = pc->irq_type[gpio];
-
generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
gpio));
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 04e929fd0ffe..fadbca907c7c 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
struct gpio_chip *chip = &pctrl->chip;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
+ int irq_base;
*chip = chv_gpio_chip;
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
- ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+ if (!need_valid_mask) {
+ irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+ chip->ngpio, NUMA_NO_NODE);
+ if (irq_base < 0) {
+ dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ return irq_base;
+ }
+ } else {
+ irq_base = 0;
+ }
+
+ ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 85de30f93a9c..56a8195096a2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b)
{
struct acpi_device *device = bl_get_data(b);
- if (b->props.power == FB_BLANK_POWERDOWN)
- call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
- else
- call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+ if (fext) {
+ if (b->props.power == FB_BLANK_POWERDOWN)
+ call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
+ else
+ call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+ }
return set_lcd_level(device, b->props.brightness);
}
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 315a4be8dc1e..9a68914100ad 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO);
MODULE_PARM_DESC(mbox_sel,
"RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
+static DEFINE_SPINLOCK(tsi721_maint_lock);
+
static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
@@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
struct tsi721_dma_desc *bd_ptr;
u32 rd_count, swr_ptr, ch_stat;
+ unsigned long flags;
int i, err = 0;
u32 op = do_wr ? MAINT_WR : MAINT_RD;
if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
return -EINVAL;
+ spin_lock_irqsave(&tsi721_maint_lock, flags);
+
bd_ptr = priv->mdma.bd_base;
rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
@@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
*/
swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
+
err_out:
+ spin_unlock_irqrestore(&tsi721_maint_lock, flags);
return err;
}
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index a3824baca2e5..3ee9af83b638 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -14,16 +14,8 @@
#include <linux/module.h>
/*
- * These interrupt-safe spinlocks protect all accesses to RIO
- * configuration space and doorbell access.
- */
-static DEFINE_SPINLOCK(rio_config_lock);
-static DEFINE_SPINLOCK(rio_doorbell_lock);
-
-/*
* Wrappers for all RIO configuration access functions. They just check
- * alignment, do locking and call the low-level functions pointed to
- * by rio_mport->ops.
+ * alignment and call the low-level functions pointed to by rio_mport->ops.
*/
#define RIO_8_BAD 0
@@ -44,13 +36,10 @@ int __rio_local_read_config_##size \
(struct rio_mport *mport, u32 offset, type *value) \
{ \
int res; \
- unsigned long flags; \
u32 data = 0; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
@@ -67,13 +56,8 @@ int __rio_local_read_config_##size \
int __rio_local_write_config_##size \
(struct rio_mport *mport, u32 offset, type value) \
{ \
- int res; \
- unsigned long flags; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
- res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
- spin_unlock_irqrestore(&rio_config_lock, flags); \
- return res; \
+ return mport->ops->lcwrite(mport, mport->id, offset, len, value);\
}
RIO_LOP_READ(8, u8, 1)
@@ -104,13 +88,10 @@ int rio_mport_read_config_##size \
(struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \
{ \
int res; \
- unsigned long flags; \
u32 data = 0; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
@@ -127,13 +108,9 @@ int rio_mport_read_config_##size \
int rio_mport_write_config_##size \
(struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \
{ \
- int res; \
- unsigned long flags; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
- res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \
- spin_unlock_irqrestore(&rio_config_lock, flags); \
- return res; \
+ return mport->ops->cwrite(mport, mport->id, destid, hopcount, \
+ offset, len, value); \
}
RIO_OP_READ(8, u8, 1)
@@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32);
*/
int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
{
- int res;
- unsigned long flags;
-
- spin_lock_irqsave(&rio_doorbell_lock, flags);
- res = mport->ops->dsend(mport, mport->id, destid, data);
- spin_unlock_irqrestore(&rio_doorbell_lock, flags);
-
- return res;
+ return mport->ops->dsend(mport, mport->id, destid, data);
}
EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index d0e5d6ee882c..e2c1988cd7c0 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
if (*str == '=')
str++;
- if (!strncmp(str, "cec_disable", 7))
+ if (!strcmp(str, "cec_disable"))
ce_arr.disabled = 1;
else
return 0;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index df63e44526ac..bf04479456a0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
select MFD_SYSCON
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SMEM
select QCOM_MDT_LOADER
select QCOM_RPROC_COMMON
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 612d91403341..633268e9d550 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
if (!(att->flags & ATT_OWN))
continue;
- if (b > IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX7D_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
att->sa, att->size);
- if (IS_ERR(priv->mem[b].cpu_addr)) {
+ if (!priv->mem[b].cpu_addr) {
dev_err(dev, "devm_ioremap_resource failed\n");
- err = PTR_ERR(priv->mem[b].cpu_addr);
- return err;
+ return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
return err;
}
- if (b > IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX7D_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e0c393214264..e2baecbb9dd3 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -34,11 +34,12 @@ config RESET_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
-config RESET_HSDK_V1
- bool "HSDK v1 Reset Driver"
- default n
+config RESET_HSDK
+ bool "Synopsys HSDK Reset Driver"
+ depends on HAS_IOMEM
+ depends on ARC_SOC_HSDK || COMPILE_TEST
help
- This enables the reset controller driver for HSDK v1.
+ This enables the reset controller driver for HSDK board.
config RESET_IMX7
bool "i.MX7 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d368367110e5..af1c15c330b3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
-obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o
+obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/reset-hsdk-v1.c b/drivers/reset/reset-hsdk.c
index bca13e4bf622..8bce391c6943 100644
--- a/drivers/reset/reset-hsdk-v1.c
+++ b/drivers/reset/reset-hsdk.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2017 Synopsys.
*
- * Synopsys HSDKv1 SDP reset driver.
+ * Synopsys HSDK Development platform reset driver.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -18,9 +18,9 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define to_hsdkv1_rst(p) container_of((p), struct hsdkv1_rst, rcdev)
+#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev)
-struct hsdkv1_rst {
+struct hsdk_rst {
void __iomem *regs_ctl;
void __iomem *regs_rst;
spinlock_t lock;
@@ -49,12 +49,12 @@ static const u32 rst_map[] = {
#define CGU_IP_SW_RESET_RESET BIT(0)
#define SW_RESET_TIMEOUT 10000
-static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id)
+static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id)
{
writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL);
}
-static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
+static int hsdk_reset_do(struct hsdk_rst *rst)
{
u32 reg;
@@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
!(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT);
}
-static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev,
+static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev);
+ struct hsdk_rst *rst = to_hsdk_rst(rcdev);
unsigned long flags;
int ret;
spin_lock_irqsave(&rst->lock, flags);
- hsdkv1_reset_config(rst, id);
- ret = hsdkv1_reset_do(rst);
+ hsdk_reset_config(rst, id);
+ ret = hsdk_reset_do(rst);
spin_unlock_irqrestore(&rst->lock, flags);
return ret;
}
-static const struct reset_control_ops hsdkv1_reset_ops = {
- .reset = hsdkv1_reset_reset,
+static const struct reset_control_ops hsdk_reset_ops = {
+ .reset = hsdk_reset_reset,
};
-static int hsdkv1_reset_probe(struct platform_device *pdev)
+static int hsdk_reset_probe(struct platform_device *pdev)
{
- struct hsdkv1_rst *rst;
+ struct hsdk_rst *rst;
struct resource *mem;
rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
@@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
spin_lock_init(&rst->lock);
rst->rcdev.owner = THIS_MODULE;
- rst->rcdev.ops = &hsdkv1_reset_ops;
+ rst->rcdev.ops = &hsdk_reset_ops;
rst->rcdev.of_node = pdev->dev.of_node;
rst->rcdev.nr_resets = HSDK_MAX_RESETS;
rst->rcdev.of_reset_n_cells = 1;
@@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
return reset_controller_register(&rst->rcdev);
}
-static const struct of_device_id hsdkv1_reset_dt_match[] = {
- { .compatible = "snps,hsdk-v1.0-reset" },
+static const struct of_device_id hsdk_reset_dt_match[] = {
+ { .compatible = "snps,hsdk-reset" },
{ },
};
-static struct platform_driver hsdkv1_reset_driver = {
- .probe = hsdkv1_reset_probe,
+static struct platform_driver hsdk_reset_driver = {
+ .probe = hsdk_reset_probe,
.driver = {
- .name = "hsdk-v1.0-reset",
- .of_match_table = hsdkv1_reset_dt_match,
+ .name = "hsdk-reset",
+ .of_match_table = hsdk_reset_dt_match,
},
};
-builtin_platform_driver(hsdkv1_reset_driver);
+builtin_platform_driver(hsdk_reset_driver);
MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver");
+MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index c60904ff40b8..3907bbc9c6cf 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
struct socfpga_reset_data *data = container_of(rcdev,
struct socfpga_reset_data,
rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
struct socfpga_reset_data,
rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
{
struct socfpga_reset_data *data = container_of(rcdev,
struct socfpga_reset_data, rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
u32 reg;
reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
+ data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
data->rcdev.ops = &socfpga_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5a5e927ea50f..5dcc9bf1c5bc 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
unsigned long flags;
intent = kzalloc(sizeof(*intent), GFP_KERNEL);
-
if (!intent)
return NULL;
intent->data = kzalloc(size, GFP_KERNEL);
if (!intent->data)
- return NULL;
+ goto free_intent;
spin_lock_irqsave(&channel->intent_lock, flags);
ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&channel->intent_lock, flags);
- return NULL;
+ goto free_data;
}
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
intent->reuse = reuseable;
return intent;
+
+free_data:
+ kfree(intent->data);
+free_intent:
+ kfree(intent);
+ return NULL;
}
static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (ret)
- return ret;
+ goto unlock;
ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
ret = channel->intent_req_result ? 0 : -ECANCELED;
}
+unlock:
mutex_unlock(&channel->intent_req_lock);
return ret;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ea19b4ff87a2..29f35e29d480 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue,
+ true);
}
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue,
+ true);
}
if (!device->stopped)
@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (device->block) {
dasd_schedule_block_bh(device->block);
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue,
+ true);
}
clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 2e7fd966c515..eb51893c74a4 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
static void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
- int *error;
+ blk_status_t *error;
int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
static void scm_blk_request_done(struct request *req)
{
- int *error = blk_mq_rq_to_pdu(req);
+ blk_status_t *error = blk_mq_rq_to_pdu(req);
blk_mq_end_request(req, *error);
}
@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
atomic_set(&bdev->queued_reqs, 0);
bdev->tag_set.ops = &scm_mq_ops;
- bdev->tag_set.cmd_size = sizeof(int);
+ bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 489b583f263d..e5c32f4b5287 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev)
static int recovery_check(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
int *redo = data;
spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) {
+ case DEV_STATE_ONLINE:
+ sch = to_subchannel(cdev->dev.parent);
+ if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
+ break;
+ /* fall through */
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused)
}
spin_unlock_irq(&recovery_lock);
} else
- CIO_MSG_EVENT(4, "recovery: end\n");
+ CIO_MSG_EVENT(3, "recovery: end\n");
}
static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data)
schedule_work(&recovery_work);
}
-static void ccw_device_schedule_recovery(void)
+void ccw_device_schedule_recovery(void)
{
unsigned long flags;
- CIO_MSG_EVENT(4, "recovery: schedule\n");
+ CIO_MSG_EVENT(3, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0;
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index ec497af99dd8..69cb70f080a5 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
void ccw_device_set_timeout(struct ccw_device *, int);
+void ccw_device_schedule_recovery(void);
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 12016e32e519..f98ea674c3d8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type)
}
}
+static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
+
+ if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
+ ccw_device_schedule_recovery();
+
+ cdev->private->path_broken_mask = broken_paths;
+}
+
void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -508,6 +519,7 @@ callback:
memset(&cdev->private->irb, 0, sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
+ ccw_device_handle_broken_paths(cdev);
break;
case -ETIME:
case -EUSERS:
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 220f49145b2f..9a1b56b2df3e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -131,6 +131,8 @@ struct ccw_device_private {
not operable */
u8 path_gone_mask; /* mask of paths, that became unavailable */
u8 path_new_mask; /* mask of paths, that became available */
+ u8 path_broken_mask; /* mask of paths, which were found to be
+ unusable */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a64285ab0728..af3e4d3f9735 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
- if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
- AAC_OPTION_VARIABLE_BLOCK_SIZE))
+ if (!aac_supports_2T(fibptr->dev)) {
dresp->mnt[0].capacityhigh = 0;
- if ((le32_to_cpu(dresp->status) != ST_OK) ||
- (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
- _aac_probe_container2(context, fibptr);
- return;
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+ _aac_probe_container2(context, fibptr);
+ return;
+ }
}
scsicmd = (struct scsi_cmnd *) context;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 92fabf2b0c24..403a639574e5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev)
return 0;
}
+static inline int aac_supports_2T(struct aac_dev *dev)
+{
+ return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
+}
+
char * get_container_type(unsigned type);
extern int numacb;
extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 87cc4a93e637..62beb2596466 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
- info = &aac->hba_map[bus][cid];
- if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
- info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+
+ if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
return FAILED;
- if (info->reset_state > 0)
+ info = &aac->hba_map[bus][cid];
+
+ if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+ info->reset_state > 0)
return FAILED;
pr_err("%s: Host adapter reset request. SCSI hang ?\n",
@@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
- info = &aac->hba_map[bus][cid];
- if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
- info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+
+ if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
return FAILED;
- if (info->reset_state > 0)
+ info = &aac->hba_map[bus][cid];
+
+ if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+ info->reset_state > 0)
return FAILED;
pr_err("%s: Host adapter reset request. SCSI hang ?\n",
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 48c2b2b34b72..0c9361c87ec8 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev)
aac_set_intx_mode(dev);
src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
+
+ msleep(5000);
}
static void aac_send_hardware_soft_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 690816f3c6af..421fe869a11e 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
* Params : SCpnt - command causing reset
* Returns : one of SCSI_RESET_ macros
*/
-int acornscsi_host_reset(struct Scsi_Host *shpnt)
+int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
{
- AS_Host *host = (AS_Host *)shpnt->hostdata;
+ AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
struct scsi_cmnd *SCptr;
host->stats.resets += 1;
@@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt)
printk(KERN_WARNING "acornscsi_reset: ");
print_sbic_status(asr, ssr, host->scsi.phase);
- for (devidx = 0; devidx < 9; devidx ++) {
+ for (devidx = 0; devidx < 9; devidx++)
acornscsi_dumplog(host, devidx);
}
#endif
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 785fb42f6650..2799a6b08f73 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
pr_err("write_pending failed since: %d\n", vscsi->flags);
- return 0;
+ return -EIO;
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 520325867e2b..31d31aad3de1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
fc_rport_enter_flogi(rdata);
mutex_unlock(&rdata->rp_mutex);
} else {
+ mutex_unlock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "work delete\n");
mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
mutex_unlock(&lport->disc.disc_mutex);
- mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index bd4605a34f54..f8dc1601efd5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
reason = FAILURE_SESSION_IN_RECOVERY;
- sc->result = DID_REQUEUE;
+ sc->result = DID_REQUEUE << 16;
goto fault;
}
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
/**
* iscsi_session_teardown - destroy session, host, and cls_session
* @cls_session: iscsi session
- *
- * The driver must have called iscsi_remove_session before
- * calling this.
*/
void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
iscsi_pool_free(&session->cmdpool);
+ iscsi_remove_session(cls_session);
+
kfree(session->password);
kfree(session->password_in);
kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->portal_type);
kfree(session->discovery_parent_type);
- iscsi_destroy_session(cls_session);
+ iscsi_free_session(cls_session);
+
iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7e7ae786121b..100bc4c8798d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"Extents and RPI headers enabled.\n");
}
mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
goto out_free_bsmbx;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 79ba3ce063a4..23bdb1ca106e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -884,7 +884,7 @@ out_err:
wcqe->total_data_placed);
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
- nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
+ nCmd->status = NVME_SC_INTERNAL;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1f59e7a74c7b..6b33a1f24f56 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res)
goto rel;
if (unlikely(res == QLA_FUNCTION_FAILED))
- fd->status = NVME_SC_FC_TRANSPORT_ERROR;
+ fd->status = NVME_SC_INTERNAL;
else
fd->status = 0;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5b2437a5ea44..937209805baf 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3175,6 +3175,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
if (ha->mqenable) {
bool mq = false;
bool startit = false;
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
qla2xxx_wake_dpc(base_vha);
- INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 38942050b265..dab876c65473 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
sshdr.asc == 0x24 || /* Invalid field in cdb */
- sshdr.asc == 0x26) { /* Parameter value invalid */
+ sshdr.asc == 0x26 || /* Parameter value invalid */
+ sshdr.asc == 0x27) { /* Write protected */
set_host_byte(scmd, DID_TARGET_FAILURE);
}
return SUCCESS;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e7818afeda2b..15590a063ad9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
+ if (*bflags & BLIST_UNMAP_LIMIT_WS)
+ sdev->unmap_limit_for_ws = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index bf53356f41f0..f796bd61f3f0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
spin_lock_irqsave(shost->host_lock, flags);
restart:
list_for_each_entry(sdev, &shost->__devices, siblings) {
+ /*
+ * We cannot call scsi_device_get() here, as
+ * we might've been called from rmmod() causing
+ * scsi_device_get() to fail the module_is_live()
+ * check.
+ */
if (sdev->channel != starget->channel ||
sdev->id != starget->id ||
- scsi_device_get(sdev))
+ !get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
- scsi_device_put(sdev);
+ put_device(&sdev->sdev_gendev);
spin_lock_irqsave(shost->host_lock, flags);
goto restart;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3c6bc0081fcb..8c46a6d536af 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
list_for_each_entry(rport, &fc_host->rports, peers) {
- if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
+ if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
+ rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
(rport->channel == channel)) {
switch (fc_host->tgtid_bind_type) {
@@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
memcpy(&rport->port_name, &ids->port_name,
sizeof(rport->port_name));
rport->port_id = ids->port_id;
- rport->roles = ids->roles;
rport->port_state = FC_PORTSTATE_ONLINE;
rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
@@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
fci->f->dd_fcrport_size);
spin_unlock_irqrestore(shost->host_lock, flags);
- if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
- scsi_target_unblock(&rport->dev, SDEV_RUNNING);
-
- /* initiate a scan of the target */
- spin_lock_irqsave(shost->host_lock, flags);
- rport->flags |= FC_RPORT_SCAN_PENDING;
- scsi_queue_work(shost, &rport->scan_work);
- spin_unlock_irqrestore(shost->host_lock, flags);
- }
+ fc_remote_port_rolechg(rport, ids->roles);
return rport;
}
}
@@ -3328,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ if (WARN_ON_ONCE(!rport))
+ return FAST_IO_FAIL;
+
return fc_block_rport(rport);
}
EXPORT_SYMBOL(fc_block_scsi_eh);
@@ -3571,7 +3566,7 @@ fc_vport_sched_delete(struct work_struct *work)
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct bsg_job *job = (void *) req->special;
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 8934f19bce8e..7404d26895f5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
- * iscsi_destroy_session - destroy iscsi session
- * @session: iscsi_session
- *
- * Can be called by a LLD or iscsi_transport. There must not be
- * any running connections.
- */
-int iscsi_destroy_session(struct iscsi_cls_session *session)
-{
- iscsi_remove_session(session);
- ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
- iscsi_free_session(session);
- return 0;
-}
-EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-
-/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
@@ -3689,7 +3673,7 @@ iscsi_if_rx(struct sk_buff *skb)
uint32_t group;
nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) ||
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 11c1738c2100..d175c5c5ccf8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break;
case SD_LBP_WS16:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS16_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS10:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS10_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
break;
case SD_LBP_ZERO:
@@ -2915,8 +2923,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
- else if (sdkp->lbpu && sdkp->max_unmap_blocks)
- sd_config_discard(sdkp, SD_LBP_UNMAP);
else
sd_config_discard(sdkp, SD_LBP_DISABLE);
}
@@ -3101,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_security(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
@@ -3117,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
@@ -3131,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cf0e71db9e51..0419c2298eab 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- val = 0;
- list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val >= SG_MAX_QUEUE)
- break;
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned = srp->sg_io_owned;
- rinfo[val].pack_id = srp->header.pack_id;
- rinfo[val].usr_ptr = srp->header.usr_ptr;
- val++;
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 6c7d7a460689..568e1c65aa82 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -99,11 +99,6 @@
/* A3700_SPI_IF_TIME_REG */
#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
-/* Flags and macros for struct a3700_spi */
-#define A3700_INSTR_CNT 1
-#define A3700_ADDR_CNT 3
-#define A3700_DUMMY_CNT 1
-
struct a3700_spi {
struct spi_master *master;
void __iomem *base;
@@ -117,9 +112,6 @@ struct a3700_spi {
u8 byte_len;
u32 wait_mask;
struct completion done;
- u32 addr_cnt;
- u32 instr_cnt;
- size_t hdr_cnt;
};
static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
}
static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
- unsigned int pin_mode)
+ unsigned int pin_mode, bool receiving)
{
u32 val;
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
break;
case SPI_NBITS_QUAD:
val |= A3700_SPI_DATA_PIN1;
+ /* RX during address reception uses 4-pin */
+ if (receiving)
+ val |= A3700_SPI_ADDR_PIN;
break;
default:
dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
- return true;
+ /* Timeout was reached */
+ return false;
}
static bool a3700_spi_transfer_wait(struct spi_device *spi,
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
{
- u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+ unsigned int addr_cnt;
u32 val = 0;
/* Clear the header registers */
spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
/* Set header counters */
if (a3700_spi->tx_buf) {
- if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
- instr_cnt = a3700_spi->buf_len;
- } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
- a3700_spi->addr_cnt)) {
- instr_cnt = a3700_spi->instr_cnt;
- addr_cnt = a3700_spi->buf_len - instr_cnt;
- } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
- instr_cnt = a3700_spi->instr_cnt;
- addr_cnt = a3700_spi->addr_cnt;
- /* Need to handle the normal write case with 1 byte
- * data
- */
- if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
- dummy_cnt = a3700_spi->buf_len - instr_cnt -
- addr_cnt;
+ /*
+ * when tx data is not 4 bytes aligned, there will be unexpected
+ * bytes out of SPI output register, since it always shifts out
+ * as whole 4 bytes. This might cause incorrect transaction with
+ * some devices. To avoid that, use SPI header count feature to
+ * transfer up to 3 bytes of data first, and then make the rest
+ * of data 4-byte aligned.
+ */
+ addr_cnt = a3700_spi->buf_len % 4;
+ if (addr_cnt) {
+ val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+ << A3700_SPI_ADDR_CNT_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+ /* Update the buffer length to be transferred */
+ a3700_spi->buf_len -= addr_cnt;
+
+ /* transfer 1~3 bytes through address count */
+ val = 0;
+ while (addr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
- val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
- << A3700_SPI_INSTR_CNT_BIT);
- val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
- << A3700_SPI_ADDR_CNT_BIT);
- val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
- << A3700_SPI_DUMMY_CNT_BIT);
}
- spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
-
- /* Update the buffer length to be transferred */
- a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
-
- /* Set Instruction */
- val = 0;
- while (instr_cnt--) {
- val = (val << 8) | a3700_spi->tx_buf[0];
- a3700_spi->tx_buf++;
- }
- spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
-
- /* Set Address */
- val = 0;
- while (addr_cnt--) {
- val = (val << 8) | a3700_spi->tx_buf[0];
- a3700_spi->tx_buf++;
- }
- spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
{
u32 val;
- int i = 0;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = 0;
- if (a3700_spi->buf_len >= 4) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
-
- a3700_spi->buf_len -= 4;
- a3700_spi->tx_buf += 4;
- } else {
- /*
- * If the remained buffer length is less than 4-bytes,
- * we should pad the write buffer with all ones. So that
- * it avoids overwrite the unexpected bytes following
- * the last one.
- */
- val = GENMASK(31, 0);
- while (a3700_spi->buf_len) {
- val &= ~(0xff << (8 * i));
- val |= *a3700_spi->tx_buf++ << (8 * i);
- i++;
- a3700_spi->buf_len--;
-
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
- val);
- }
- break;
- }
+ val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+ a3700_spi->buf_len -= 4;
+ a3700_spi->tx_buf += 4;
}
return 0;
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi->rx_buf = xfer->rx_buf;
a3700_spi->buf_len = xfer->len;
- /* SPI transfer headers */
- a3700_spi_header_set(a3700_spi);
-
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
else if (xfer->rx_buf)
nbits = xfer->rx_nbits;
- a3700_spi_pin_mode_set(a3700_spi, nbits);
+ a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
+
+ /* Flush the FIFOs */
+ a3700_spi_fifo_flush(a3700_spi);
+
+ /* Transfer first bytes of data when buffer is not 4-byte aligned */
+ a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
/* Set read data length */
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
dev_err(&spi->dev, "wait wfifo empty timed out\n");
return -ETIMEDOUT;
}
- } else {
- /*
- * If the instruction in SPI_INSTR does not require data
- * to be written to the SPI device, wait until SPI_RDY
- * is 1 for the SPI interface to be in idle.
- */
- if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
- dev_err(&spi->dev, "wait xfer ready timed out\n");
- return -ETIMEDOUT;
- }
+ }
+
+ if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+ dev_err(&spi->dev, "wait xfer ready timed out\n");
+ return -ETIMEDOUT;
}
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
memset(spi, 0, sizeof(struct a3700_spi));
spi->master = master;
- spi->instr_cnt = A3700_INSTR_CNT;
- spi->addr_cnt = A3700_ADDR_CNT;
- spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
- A3700_DUMMY_CNT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 6ef6c44f39f5..a172ab299e80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
goto qspi_probe_err;
}
} else {
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[CHIP_SELECT])) {
ret = PTR_ERR(qspi->base[CHIP_SELECT]);
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
}
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!qspi->dev_ids) {
ret = -ENOMEM;
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
for (val = 0; val < num_irqs; val++) {
@@ -1369,8 +1369,9 @@ qspi_reg_err:
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
- spi_master_put(master);
kfree(qspi->dev_ids);
+qspi_resource_err:
+ spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 680cdf549506..ba9743fa2326 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
* no need to check it there.
* However, we need to ensure the following calculations.
*/
- if ((div < SPI_MBR_DIV_MIN) &&
- (div > SPI_MBR_DIV_MAX))
+ if (div < SPI_MBR_DIV_MIN ||
+ div > SPI_MBR_DIV_MAX)
return -EINVAL;
/* Determine the first power of 2 greater than or equal to div */
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index d11c6de9c777..6150d2780e22 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st,
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
unsigned long long scale_uv;
int i, ret, id;
- u8 ones[6];
/* reset the serial interface */
- memset(&ones, 0xFF, 6);
- ret = spi_write(st->sd.spi, &ones, 6);
+ ret = ad_sd_reset(&st->sd, 48);
if (ret < 0)
goto out;
usleep_range(500, 1000); /* Wait for at least 500us */
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 1691760339da..02573c517d9d 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -172,7 +172,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
reg_address);
goto error_ret;
}
- *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
+ *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) |
(st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
error_ret:
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index d96f4512224f..b55e5ebba8b4 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
struct media_link, list);
ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
/* async subdev complete notifier */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 13eaf16ecd16..87595c594b12 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -496,8 +496,12 @@ static int spinand_program_page(struct spi_device *spi_nand,
if (!wbuf)
return -ENOMEM;
- enable_read_hw_ecc = 0;
- spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
+ enable_read_hw_ecc = 1;
+ retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev, "ecc error on read page!!!\n");
+ return retval;
+ }
for (i = offset, j = 0; i < len; i++, j++)
wbuf[i] &= buf[j];
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index c4b1b218ea38..290b419aa9dd 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -570,12 +570,6 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
dev_dbg(&spi->dev, "set: DIO mapping");
#endif
- // check DIO number
- if (DIONumber > 5) {
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
-
switch (DIONumber) {
case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break;
case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break;
@@ -583,6 +577,9 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break;
case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break;
case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break;
+ default:
+ dev_dbg(&spi->dev, "set: illegal input param");
+ return -EINVAL;
}
// read reg
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 6b778206a1a3..cb8a95aabd6c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -119,9 +119,8 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
- rtw_free_mlme_priv_ie_data(pmlmepriv);
-
if (pmlmepriv) {
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
if (pmlmepriv->free_bss_buf) {
vfree(pmlmepriv->free_bss_buf);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
index 92277457aba4..ce1dd6f9036f 100644
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
@@ -311,6 +311,8 @@ static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t
if (num < 2)
return count;
+ if (id >= TOTAL_CAM_ENTRY)
+ return -EINVAL;
if (strcmp("c", cmd) == 0) {
_clear_cam_entry(adapter, id);
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
index 5f84526cb5b5..edbf6af1c8b7 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
@@ -2901,11 +2901,11 @@ halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
if (halmac_adapter->fw_version.h2c_version < 4)
return HALMAC_RET_FW_NO_SUPPORT;
+ driver_adapter = halmac_adapter->driver_adapter;
+
HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
"[TRACE]%s ==========>\n", __func__);
- driver_adapter = halmac_adapter->driver_adapter;
-
HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
"[TRACE]%s <==========\n", __func__);
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
index f33024e4d853..544f638ed3ef 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
@@ -1618,10 +1618,11 @@ halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter,
void *driver_adapter = NULL;
enum halmac_ret_status status = HALMAC_RET_SUCCESS;
+ driver_adapter = halmac_adapter->driver_adapter;
+
HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
"%s!!\n", __func__);
- driver_adapter = halmac_adapter->driver_adapter;
h2c_header = h2c_buff;
h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
@@ -1713,10 +1714,11 @@ halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode,
void *driver_adapter = NULL;
enum halmac_ret_status status = HALMAC_RET_SUCCESS;
+ driver_adapter = halmac_adapter->driver_adapter;
+
HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
"halmac_send_h2c_set_pwr_mode_88xx!!\n");
- driver_adapter = halmac_adapter->driver_adapter;
h2c_header = H2c_buff;
h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
@@ -2143,10 +2145,11 @@ halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
enum halmac_cmd_process_status *process_status =
&halmac_adapter->halmac_state.scan_state_set.process_status;
+ driver_adapter = halmac_adapter->driver_adapter;
+
HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
"halmac_ctrl_ch_switch!!\n");
- driver_adapter = halmac_adapter->driver_adapter;
halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
if (halmac_transition_scan_state_88xx(
@@ -2276,15 +2279,13 @@ enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx(
{
u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
u16 h2c_seq_mum = 0;
- void *driver_adapter = NULL;
+ void *driver_adapter = halmac_adapter->driver_adapter;
struct halmac_h2c_header_info h2c_header_info;
enum halmac_ret_status status = HALMAC_RET_SUCCESS;
HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
"%s!!\n", __func__);
- driver_adapter = halmac_adapter->driver_adapter;
-
UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en);
UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th);
UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 67956e24779c..56f7be6af1f6 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1376,6 +1376,8 @@ static void reset_highlight_buffers(struct vc_data *);
static int read_all_key;
+static int in_keyboard_notifier;
+
static void start_read_all_timer(struct vc_data *vc, int command);
enum {
@@ -1408,7 +1410,10 @@ static void read_all_doc(struct vc_data *vc)
cursor_track = read_all_mode;
spk_reset_index_count(0);
if (get_sentence_buf(vc, 0) == -1) {
- kbd_fakekey2(vc, RA_DOWN_ARROW);
+ del_timer(&cursor_timer);
+ if (!in_keyboard_notifier)
+ speakup_fake_down_arrow();
+ start_read_all_timer(vc, RA_DOWN_ARROW);
} else {
say_sentence_num(0, 0);
synth_insert_next_index(0);
@@ -2212,8 +2217,10 @@ static int keyboard_notifier_call(struct notifier_block *nb,
int ret = NOTIFY_OK;
static int keycode; /* to hold the current keycode */
+ in_keyboard_notifier = 1;
+
if (vc->vc_mode == KD_GRAPHICS)
- return ret;
+ goto out;
/*
* First, determine whether we are handling a fake keypress on
@@ -2225,7 +2232,7 @@ static int keyboard_notifier_call(struct notifier_block *nb,
*/
if (speakup_fake_key_pressed())
- return ret;
+ goto out;
switch (code) {
case KBD_KEYCODE:
@@ -2266,6 +2273,8 @@ static int keyboard_notifier_call(struct notifier_block *nb,
break;
}
}
+out:
+ in_keyboard_notifier = 0;
return ret;
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 74cce4f1a7bd..27ecf6fb49fd 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -1826,7 +1826,7 @@ static __init int visorutil_spar_detect(void)
return 0;
}
-static int init_unisys(void)
+static int __init init_unisys(void)
{
int result;
@@ -1841,7 +1841,7 @@ static int init_unisys(void)
return 0;
};
-static void exit_unisys(void)
+static void __exit exit_unisys(void)
{
acpi_bus_unregister_driver(&unisys_acpi_driver);
}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 5f3d8f2339e3..4be864dbd41c 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
__func__, instance);
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
- ret = 0; // xxx todo -1;
- goto err_free_mem;
+ return 0;
}
/* Initialize and create a VCHI connection */
@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
__func__, ret);
- ret = -EIO;
- goto err_free_mem;
+ return -EIO;
}
ret = vchi_connect(NULL, 0, vchi_instance);
if (ret) {
LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
__func__, ret);
- ret = -EIO;
- goto err_free_mem;
+ kfree(vchi_instance);
+ return -EIO;
}
initted = 1;
}
@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
if (IS_ERR(instance)) {
LOG_ERR("%s: failed to initialize audio service\n", __func__);
- ret = PTR_ERR(instance);
- goto err_free_mem;
+ /* vchi_instance is retained for use the next time. */
+ return PTR_ERR(instance);
}
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
LOG_DBG(" success !\n");
- ret = 0;
-err_free_mem:
- kfree(vchi_instance);
- return ret;
+ return 0;
}
int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 0159ca4407d8..be08849175ea 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -612,18 +612,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
if (head_bytes > actual)
head_bytes = actual;
- memcpy((char *)page_address(pages[0]) +
+ memcpy((char *)kmap(pages[0]) +
pagelist->offset,
fragments,
head_bytes);
+ kunmap(pages[0]);
}
if ((actual >= 0) && (head_bytes < actual) &&
(tail_bytes != 0)) {
- memcpy((char *)page_address(pages[num_pages - 1]) +
+ memcpy((char *)kmap(pages[num_pages - 1]) +
((pagelist->offset + actual) &
(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
fragments + g_cache_line_size,
tail_bytes);
+ kunmap(pages[num_pages - 1]);
}
down(&g_free_fragments_mutex);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 1c0c9553bc05..7dd38047ba23 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -246,11 +246,11 @@ struct mxser_port {
unsigned char err_shadow;
struct async_icount icount; /* kernel counters for 4 input interrupts */
- int timeout;
+ unsigned int timeout;
int read_status_mask;
int ignore_status_mask;
- int xmit_fifo_size;
+ unsigned int xmit_fifo_size;
int xmit_head;
int xmit_tail;
int xmit_cnt;
@@ -572,8 +572,9 @@ static void mxser_dtr_rts(struct tty_port *port, int on)
static int mxser_set_baud(struct tty_struct *tty, long newspd)
{
struct mxser_port *info = tty->driver_data;
- int quot = 0, baud;
+ unsigned int quot = 0, baud;
unsigned char cval;
+ u64 timeout;
if (!info->ioaddr)
return -1;
@@ -594,8 +595,13 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
quot = 0;
}
- info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
- info->timeout += HZ / 50; /* Add .02 seconds of slop */
+ /*
+ * worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the
+ * u64 domain
+ */
+ timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot;
+ do_div(timeout, info->baud_base);
+ info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */
if (quot) {
info->MCR |= UART_MCR_DTR;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 583c9a0c7ecc..8c48c3784831 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -507,9 +507,14 @@ static void bcm_uart_set_termios(struct uart_port *port,
{
unsigned int ctl, baud, quot, ier;
unsigned long flags;
+ int tries;
spin_lock_irqsave(&port->lock, flags);
+ /* Drain the hot tub fully before we power it off for the winter. */
+ for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
+ mdelay(10);
+
/* disable uart while changing speed */
bcm_uart_disable(port);
bcm_uart_flush(port);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 849c1f9991ce..f0252184291e 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1276,7 +1276,6 @@ static void rx_dma_timer_init(struct lpuart_port *sport)
static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- int ret;
unsigned long flags;
unsigned char temp;
@@ -1291,11 +1290,6 @@ static int lpuart_startup(struct uart_port *port)
sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
- ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
- DRIVER_NAME, sport);
- if (ret)
- return ret;
-
spin_lock_irqsave(&sport->port.lock, flags);
lpuart_setup_watermark(sport);
@@ -1333,7 +1327,6 @@ static int lpuart_startup(struct uart_port *port)
static int lpuart32_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- int ret;
unsigned long flags;
unsigned long temp;
@@ -1346,11 +1339,6 @@ static int lpuart32_startup(struct uart_port *port)
sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK) - 1);
- ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0,
- DRIVER_NAME, sport);
- if (ret)
- return ret;
-
spin_lock_irqsave(&sport->port.lock, flags);
lpuart32_setup_watermark(sport);
@@ -1380,8 +1368,6 @@ static void lpuart_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
- devm_free_irq(port->dev, port->irq, sport);
-
if (sport->lpuart_dma_rx_use) {
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
@@ -1400,7 +1386,6 @@ static void lpuart_shutdown(struct uart_port *port)
static void lpuart32_shutdown(struct uart_port *port)
{
- struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long temp;
unsigned long flags;
@@ -1413,8 +1398,6 @@ static void lpuart32_shutdown(struct uart_port *port)
lpuart32_write(port, temp, UARTCTRL);
spin_unlock_irqrestore(&port->lock, flags);
-
- devm_free_irq(port->dev, port->irq, sport);
}
static void
@@ -2212,16 +2195,22 @@ static int lpuart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &sport->port);
- if (lpuart_is_32(sport))
+ if (lpuart_is_32(sport)) {
lpuart_reg.cons = LPUART32_CONSOLE;
- else
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
+ DRIVER_NAME, sport);
+ } else {
lpuart_reg.cons = LPUART_CONSOLE;
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
+ DRIVER_NAME, sport);
+ }
+
+ if (ret)
+ goto failed_irq_request;
ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret) {
- clk_disable_unprepare(sport->clk);
- return ret;
- }
+ if (ret)
+ goto failed_attach_port;
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
@@ -2240,6 +2229,11 @@ static int lpuart_probe(struct platform_device *pdev)
}
return 0;
+
+failed_attach_port:
+failed_irq_request:
+ clk_disable_unprepare(sport->clk);
+ return ret;
}
static int lpuart_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index cdd2f942317c..b9c7a904c1ea 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -889,7 +889,16 @@ static int sccnxp_probe(struct platform_device *pdev)
goto err_out;
uartclk = 0;
} else {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_out;
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ goto err_out;
+
uartclk = clk_get_rate(clk);
}
@@ -988,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev)
uart_unregister_driver(&s->uart);
err_out:
if (!IS_ERR(s->regulator))
- return regulator_disable(s->regulator);
+ regulator_disable(s->regulator);
return ret;
}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 2fe216b276e2..84a8ac2a779f 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
- if (!WARN_ON(disc == N_TTY)) {
- tty_ldisc_put(tty->ldisc);
- tty->ldisc = NULL;
- }
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
}
return retval;
}
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
if (tty->ldisc) {
if (reinit) {
- if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
- tty_ldisc_reinit(tty, N_TTY);
+ if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
+ tty_ldisc_reinit(tty, N_TTY) < 0)
+ WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
} else
tty_ldisc_kill(tty);
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e056064259c..18c923a4c16e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 5aacea1978a5..3e865dbf878c 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -190,8 +190,10 @@ static void wdm_in_callback(struct urb *urb)
/*
* only set a new error if there is no previous error.
* Errors are only cleared during read/open
+ * Avoid propagating -EPIPE (stall) to userspace since it is
+ * better handled as an empty read
*/
- if (desc->rerr == 0)
+ if (desc->rerr == 0 && status != -EPIPE)
desc->rerr = status;
if (length + desc->length > desc->wMaxCommand) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 4be52c602e9b..883549ee946c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
+ struct usb_interface_assoc_descriptor *d;
+
+ d = (struct usb_interface_assoc_descriptor *)header;
+ if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+ dev_warn(ddev,
+ "config %d has an invalid interface association descriptor of length %d, skipping\n",
+ cfgno, d->bLength);
+ continue;
+ }
+
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
- config->intf_assoc[iad_num] =
- (struct usb_interface_assoc_descriptor
- *)header;
+ config->intf_assoc[iad_num] = d;
iad_num++;
}
@@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev)
}
if (dev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(100);
+ msleep(200);
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
@@ -952,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
- length = cap->bLength;
- if (total_len < length)
+ if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+ dev->bos->desc->bNumDeviceCaps = i;
break;
+ }
+ length = cap->bLength;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 318bb3b96687..e9326f31db8d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644);
MODULE_PARM_DESC(usbfs_memory_mb,
"maximum MB allowed for usbfs buffers (0 = no limit)");
+/* Hard limit, necessary to avoid arithmetic overflow */
+#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
+
static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
/* Check whether it's okay to allocate more memory for a transfer */
@@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT))
return -EINVAL;
+ if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
+ return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer)
return -EINVAL;
if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 41eaf0b52518..e9ce6bb0b22d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
- /* bomb out completely if the connection bounced. A USB 3.0
- * connection may bounce if multiple warm resets were issued,
+ /* Retry if connect change is set but status is still connected.
+ * A USB 3.0 connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
- (portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
+ (portchange & USB_PORT_STAT_C_CONNECTION)) {
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ return -EAGAIN;
+ }
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
@@ -4838,7 +4841,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto loop;
if (udev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(1000);
+ msleep(2000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4c38ea41ae96..371a07d874a3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
elength = 1;
goto next_desc;
}
+ if ((buflen < elength) || (elength < 3)) {
+ dev_err(&intf->dev, "invalid descriptor buffer length\n");
+ break;
+ }
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e311202..a6aaf2f193a4 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* MIDI keyboard WORLDE MINI */
+ { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 4cef7d4f9cd0..a26d1fde0f5e 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -177,6 +177,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "xlnx,zynqmp-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
+ { .compatible = "sprd,sc9860-dwc3" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 827e376bfa97..75e6cb044eb2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
DWC3_TRBCTL_CONTROL_DATA,
true);
+ req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+
/* Now prepare one extra TRB to align transfer size */
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
maxpacket - rem,
@@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
DWC3_TRBCTL_CONTROL_DATA,
true);
+ req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+
/* Now prepare one extra TRB to align transfer size */
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
0, DWC3_TRBCTL_CONTROL_DATA,
@@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
false);
+
+ req->trb = &dwc->ep0_trb[dep->trb_enqueue];
+
ret = dwc3_ep0_start_trans(dep);
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dd74c99d6ce1..5d061b3d8224 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended);
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
+ struct usb_string *dev_str = gstr->strings;
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
composite_dev_cleanup(cdev);
+ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
+ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
+
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index a22a892de7b7..aeb9f3c40521 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
NULL
};
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
- int n_interf,
- struct usb_os_desc **desc,
- char **names,
- struct module *owner)
+struct config_group *usb_os_desc_prepare_interf_dir(
+ struct config_group *parent,
+ int n_interf,
+ struct usb_os_desc **desc,
+ char **names,
+ struct module *owner)
{
struct config_group *os_desc_group;
struct config_item_type *os_desc_type, *interface_type;
@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
if (!vlabuf)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
configfs_add_default_group(&d->group, os_desc_group);
}
- return 0;
+ return os_desc_group;
}
EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index 36c468c4f5e9..540d5e92ed22 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -5,11 +5,12 @@
void unregister_gadget_item(struct config_item *item);
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
- int n_interf,
- struct usb_os_desc **desc,
- char **names,
- struct module *owner);
+struct config_group *usb_os_desc_prepare_interf_dir(
+ struct config_group *parent,
+ int n_interf,
+ struct usb_os_desc **desc,
+ char **names,
+ struct module *owner);
static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
{
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 9990944a7245..8b342587f8ad 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -46,7 +46,8 @@
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
/* Creates new ffs_data object. */
-static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
+ __attribute__((malloc));
/* Opened counter handling. */
static void ffs_data_opened(struct ffs_data *ffs);
@@ -780,11 +781,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
struct usb_request *req)
{
struct ffs_io_data *io_data = req->context;
+ struct ffs_data *ffs = io_data->ffs;
ENTER();
INIT_WORK(&io_data->work, ffs_user_copy_worker);
- schedule_work(&io_data->work);
+ queue_work(ffs->io_completion_wq, &io_data->work);
}
static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
@@ -1500,7 +1502,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
if (unlikely(ret < 0))
return ERR_PTR(ret);
- ffs = ffs_data_new();
+ ffs = ffs_data_new(dev_name);
if (unlikely(!ffs))
return ERR_PTR(-ENOMEM);
ffs->file_perms = data.perms;
@@ -1610,6 +1612,7 @@ static void ffs_data_put(struct ffs_data *ffs)
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
+ destroy_workqueue(ffs->io_completion_wq);
kfree(ffs->dev_name);
kfree(ffs);
}
@@ -1642,7 +1645,7 @@ static void ffs_data_closed(struct ffs_data *ffs)
ffs_data_put(ffs);
}
-static struct ffs_data *ffs_data_new(void)
+static struct ffs_data *ffs_data_new(const char *dev_name)
{
struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
if (unlikely(!ffs))
@@ -1650,6 +1653,12 @@ static struct ffs_data *ffs_data_new(void)
ENTER();
+ ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
+ if (!ffs->io_completion_wq) {
+ kfree(ffs);
+ return NULL;
+ }
+
refcount_set(&ffs->ref, 1);
atomic_set(&ffs->opened, 0);
ffs->state = FFS_READ_DESCRIPTORS;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d6bd0244b008..5153e29870c3 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -307,8 +307,6 @@ struct fsg_common {
struct completion thread_notifier;
struct task_struct *thread_task;
- /* Callback functions. */
- const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
@@ -2438,6 +2436,7 @@ static void handle_exception(struct fsg_common *common)
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
+ int i;
/*
* Allow the thread to be killed by a signal, but set the signal mask
@@ -2476,21 +2475,16 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (!common->ops || !common->ops->thread_exits
- || common->ops->thread_exits(common) < 0) {
- int i;
+ /* Eject media from all LUNs */
- down_write(&common->filesem);
- for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
- struct fsg_lun *curlun = common->luns[i];
- if (!curlun || !fsg_lun_is_open(curlun))
- continue;
+ down_write(&common->filesem);
+ for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
+ struct fsg_lun *curlun = common->luns[i];
+ if (curlun && fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
- curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
- }
- up_write(&common->filesem);
}
+ up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@@ -2681,13 +2675,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops)
-{
- common->ops = ops;
-}
-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
-
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index d3902313b8ac..dc05ca0c4359 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -60,17 +60,6 @@ struct fsg_module_parameters {
struct fsg_common;
/* FSF callback functions */
-struct fsg_operations {
- /*
- * Callback function to call when thread exits. If no
- * callback is set or it returns value lower then zero MSF
- * will force eject all LUNs it operates on (including those
- * marked as non-removable or with prevent_medium_removal flag
- * set).
- */
- int (*thread_exits)(struct fsg_common *common);
-};
-
struct fsg_lun_opts {
struct config_group group;
struct fsg_lun *lun;
@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
void fsg_common_remove_luns(struct fsg_common *common);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops);
-
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 8df244fc9d80..ea0da35a44e2 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -555,6 +555,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
size_t size; /* Amount of data in a TX request. */
size_t bytes_copied = 0;
struct usb_request *req;
+ int value;
DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
@@ -634,7 +635,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
- if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) {
+ /* here, we unlock, and only unlock, to avoid deadlock. */
+ spin_unlock(&dev->lock);
+ value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
+ spin_lock(&dev->lock);
+ if (value) {
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index e1d5853ef1e4..c7c5b3ce1d98 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
free_netdev(opts->net);
}
+ kfree(opts->rndis_interf_group); /* single VLA chunk */
kfree(opts);
}
@@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
struct f_rndis_opts *opts;
struct usb_os_desc *descs[1];
char *names[1];
+ struct config_group *rndis_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
names[0] = "rndis";
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
- usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
- names, THIS_MODULE);
+ rndis_interf_group =
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
+ if (IS_ERR(rndis_interf_group)) {
+ rndis_free_inst(&opts->func_inst);
+ return ERR_CAST(rndis_interf_group);
+ }
+ opts->rndis_interf_group = rndis_interf_group;
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 540f1c48c1a8..79f70ebf85dc 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -279,6 +279,7 @@ struct ffs_data {
} file_perms;
struct eventfd_ctx *ffs_eventfd;
+ struct workqueue_struct *io_completion_wq;
bool no_disconnect;
struct work_struct reset_work;
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index a35ee3c2545d..efdb7ac381d9 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -26,6 +26,7 @@ struct f_rndis_opts {
bool bound;
bool borrowed_net;
+ struct config_group *rndis_interf_group;
struct usb_os_desc rndis_os_desc;
char rndis_ext_compat_id[16];
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 684900fcfe24..5c28bee327e1 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -28,7 +28,7 @@
#include <linux/aio.h>
#include <linux/uio.h>
#include <linux/refcount.h>
-
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@@ -116,6 +116,7 @@ enum ep0_state {
struct dev_data {
spinlock_t lock;
refcount_t count;
+ int udc_usage;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
- spin_unlock(&epdata->dev->lock);
usb_ep_free_request(ep, req);
+ spin_unlock(&epdata->dev->lock);
put_ep(epdata);
}
@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
struct usb_request *req = dev->req;
if ((retval = setup_req (ep, req, 0)) == 0) {
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
retval = usb_ep_queue (ep, req, GFP_KERNEL);
spin_lock_irq (&dev->lock);
+ --dev->udc_usage;
}
dev->state = STATE_DEV_CONNECTED;
@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
retval = -EIO;
else {
len = min (len, (size_t)dev->req->actual);
-// FIXME don't call this with the spinlock held ...
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
if (copy_to_user (buf, dev->req->buf, len))
retval = -EFAULT;
else
retval = len;
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
clean_req (dev->gadget->ep0, dev->req);
/* NOTE userspace can't yet choose to stall */
}
@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
dev->state = STATE_DEV_CONNECTED;
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
if (copy_from_user (dev->req->buf, buf, len))
retval = -EFAULT;
@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
GFP_KERNEL);
}
spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
if (retval < 0) {
clean_req (dev->gadget->ep0, dev->req);
} else
@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
- if (gadget->ops->ioctl)
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_DEV_OPENED ||
+ dev->state == STATE_DEV_UNBOUND) {
+ /* Not bound to a UDC */
+ } else if (gadget->ops->ioctl) {
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
+
ret = gadget->ops->ioctl (gadget, code, value);
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
+ }
+ spin_unlock_irq(&dev->lock);
+
return ret;
}
@@ -1463,10 +1483,12 @@ delegate:
if (value < 0)
break;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
GFP_KERNEL);
spin_lock (&dev->lock);
+ --dev->udc_usage;
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
@@ -1490,8 +1512,12 @@ delegate:
req->length = value;
req->zero = value < w_length;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
+ spin_lock(&dev->lock);
+ --dev->udc_usage;
+ spin_unlock(&dev->lock);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
/* break link to FS */
ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
list_del_init (&ep->epfiles);
+ spin_unlock_irq (&dev->lock);
+
dentry = ep->dentry;
ep->dentry = NULL;
parent = d_inode(dentry->d_parent);
/* break link to controller */
+ mutex_lock(&ep->lock);
if (ep->state == STATE_EP_ENABLED)
(void) usb_ep_disable (ep->ep);
ep->state = STATE_EP_UNBOUND;
usb_ep_free_request (ep->ep, ep->req);
ep->ep = NULL;
+ mutex_unlock(&ep->lock);
+
wake_up (&ep->wait);
put_ep (ep);
- spin_unlock_irq (&dev->lock);
-
/* break link to dcache */
inode_lock(parent);
d_delete (dentry);
@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
spin_lock_irq (&dev->lock);
dev->state = STATE_DEV_UNBOUND;
+ while (dev->udc_usage > 0) {
+ spin_unlock_irq(&dev->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dev->lock);
+ }
spin_unlock_irq (&dev->lock);
destroy_ep_files (dev);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index e99ab57ee3e5..fcba59782f26 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
-static unsigned long msg_registered;
-static void msg_cleanup(void);
-
-static int msg_thread_exits(struct fsg_common *common)
-{
- msg_cleanup();
- return 0;
-}
-
static int msg_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
static int msg_bind(struct usb_composite_dev *cdev)
{
- static const struct fsg_operations ops = {
- .thread_exits = msg_thread_exits,
- };
struct fsg_opts *opts;
struct fsg_config config;
int status;
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
if (status)
goto fail;
- fsg_common_set_ops(opts->common, &ops);
-
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
@@ -256,18 +242,12 @@ MODULE_LICENSE("GPL");
static int __init msg_init(void)
{
- int ret;
-
- ret = usb_composite_probe(&msg_driver);
- set_bit(0, &msg_registered);
-
- return ret;
+ return usb_composite_probe(&msg_driver);
}
module_init(msg_init);
-static void msg_cleanup(void)
+static void __exit msg_cleanup(void)
{
- if (test_and_clear_bit(0, &msg_registered))
- usb_composite_unregister(&msg_driver);
+ usb_composite_unregister(&msg_driver);
}
module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 7cd5c969fcbe..1e9567091d86 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -273,6 +273,7 @@ config USB_SNP_CORE
config USB_SNP_UDC_PLAT
tristate "Synopsys USB 2.0 Device controller"
depends on USB_GADGET && OF && HAS_DMA
+ depends on EXTCON || EXTCON=n
select USB_GADGET_DUALSPEED
select USB_SNP_CORE
default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 98d71400f8a1..a884c022df7a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -29,6 +29,8 @@
#include <linux/of_gpio.h>
#include "atmel_usba_udc.h"
+#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
+ | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
@@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev)
IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(udc->vbus_pin), NULL,
- usba_vbus_irq_thread, IRQF_ONESHOT,
+ usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = -ENODEV;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 75c51ca4ee0f..d41d07aae0ce 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1320,8 +1320,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
udc->dev.driver = &driver->driver;
udc->gadget->dev.driver = &driver->driver;
- if (driver->max_speed < udc->gadget->max_speed)
- usb_gadget_udc_set_speed(udc, driver->max_speed);
+ usb_gadget_udc_set_speed(udc, driver->max_speed);
ret = driver->bind(udc->gadget, driver);
if (ret)
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a030d7923d7d..f04e91ef9e7c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -237,6 +237,8 @@ struct dummy_hcd {
struct usb_device *udev;
struct list_head urbp_list;
+ struct urbp *next_frame_urbp;
+
u32 stream_en_ep;
u8 num_stream[30 / 2];
@@ -253,11 +255,13 @@ struct dummy {
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
+ int callback_usage;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
+ unsigned ints_enabled:1;
unsigned udc_suspended:1;
unsigned pullup:1;
@@ -375,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
USB_PORT_STAT_CONNECTION) == 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
- if ((dum_hcd->port_status &
- USB_PORT_STAT_ENABLE) == 1 &&
- (dum_hcd->port_status &
- USB_SS_PORT_LS_U0) == 1 &&
- dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
+ (dum_hcd->port_status &
+ USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
dum_hcd->active = 1;
}
} else {
@@ -416,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
static void set_link_state(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
+ unsigned int power_bit;
dum_hcd->active = 0;
if (dum->pullup)
@@ -426,32 +430,43 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
return;
set_link_state_by_speed(dum_hcd);
+ power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+ USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* Currently !connected or in reset */
- if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+ if ((dum_hcd->port_status & power_bit) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
- unsigned disconnect = USB_PORT_STAT_CONNECTION &
+ unsigned int disconnect = power_bit &
dum_hcd->old_status & (~dum_hcd->port_status);
- unsigned reset = USB_PORT_STAT_RESET &
+ unsigned int reset = USB_PORT_STAT_RESET &
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
- if (dum->driver && (disconnect || reset)) {
+ if (dum->ints_enabled && (disconnect || reset)) {
stop_activity(dum);
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
- } else if (dum_hcd->active != dum_hcd->old_active) {
+ } else if (dum_hcd->active != dum_hcd->old_active &&
+ dum->ints_enabled) {
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -972,8 +987,11 @@ static int dummy_udc_start(struct usb_gadget *g,
* can't enumerate without help from the driver we're binding.
*/
+ spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
+ dum->ints_enabled = 1;
+ spin_unlock_irq(&dum->lock);
return 0;
}
@@ -984,6 +1002,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
+ dum->ints_enabled = 0;
+ stop_activity(dum);
+
+ /* emulate synchronize_irq(): wait for callbacks to finish */
+ while (dum->callback_usage > 0) {
+ spin_unlock_irq(&dum->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dum->lock);
+ }
+
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
@@ -1037,7 +1065,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.max_speed = USB_SPEED_SUPER;
+ if (mod_data.is_super_speed)
+ dum->gadget.max_speed = USB_SPEED_SUPER;
+ else if (mod_data.is_high_speed)
+ dum->gadget.max_speed = USB_SPEED_HIGH;
+ else
+ dum->gadget.max_speed = USB_SPEED_FULL;
dum->gadget.dev.parent = &pdev->dev;
init_dummy_udc_hw(dum);
@@ -1246,6 +1279,8 @@ static int dummy_urb_enqueue(
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
+ if (!dum_hcd->next_frame_urbp)
+ dum_hcd->next_frame_urbp = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
@@ -1521,6 +1556,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
+ if (!dum->ints_enabled)
+ return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@@ -1762,6 +1799,7 @@ static void dummy_timer(unsigned long _dum_hcd)
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
+ dum_hcd->next_frame_urbp = NULL;
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_info[i].name)
@@ -1778,6 +1816,10 @@ restart:
int type;
int status = -EINPROGRESS;
+ /* stop when we reach URBs queued after the timer interrupt */
+ if (urbp == dum_hcd->next_frame_urbp)
+ break;
+
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
@@ -1857,10 +1899,12 @@ restart:
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
+ ++dum->callback_usage;
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
+ --dum->callback_usage;
if (value >= 0) {
/* no delays (max 64KB data stage) */
@@ -2561,8 +2605,6 @@ static struct hc_driver dummy_hcd = {
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
- .flags = HCD_USB3 | HCD_SHARED,
-
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
@@ -2591,8 +2633,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
dum = *((void **)dev_get_platdata(&pdev->dev));
- if (!mod_data.is_super_speed)
+ if (mod_data.is_super_speed)
+ dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
+ else if (mod_data.is_high_speed)
dummy_hcd.flags = HCD_USB2;
+ else
+ dummy_hcd.flags = HCD_USB11;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index df37c1e6e9d5..63a206122058 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1038,7 +1038,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
usb3_ep->ep.maxpacket);
u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
u32 tmp = 0;
- bool is_last;
+ bool is_last = !len ? true : false;
if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
return -EBUSY;
@@ -1059,7 +1059,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
usb3_write(usb3, tmp, fifo_reg);
}
- is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
+ if (!is_last)
+ is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
/* Send the data */
usb3_set_px_con_send(usb3_ep, len, is_last);
@@ -1150,7 +1151,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
usb3_set_p0_con_for_ctrl_read_data(usb3);
} else {
usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
- usb3_set_p0_con_for_ctrl_write_data(usb3);
+ if (usb3_req->req.length)
+ usb3_set_p0_con_for_ctrl_write_data(usb3);
}
usb3_p0_xfer(usb3_ep, usb3_req);
@@ -2053,7 +2055,16 @@ static u32 usb3_calc_ramarea(int ram_size)
static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
- return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc));
+ int i;
+ const u32 max_packet_array[] = {8, 16, 32, 64, 512};
+ u32 mpkt = PN_RAMMAP_MPKT(1024);
+
+ for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
+ if (usb_endpoint_maxp(desc) <= max_packet_array[i])
+ mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
+ }
+
+ return usb3_ep->rammap_val | mpkt;
}
static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 658d9d1f9ea3..6dda3623a276 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
return 0;
- usleep_range(40, 60);
+ udelay(50);
}
dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
- * and then waits 5 seconds for the BIOS to hand over control.
+ * and then waits 1 second for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
- /* Wait for 5 seconds with 10 microsecond polling interval */
+ /* Wait for 1 second with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
- 0, 5000, 10);
+ 0, 1000000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
@@ -1100,7 +1100,7 @@ hc_init:
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
- 5000, 10);
+ 5000000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index ad89a6d4111b..a2336deb5e36 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
/* If PSI table exists, add the custom speed attributes from it */
if (usb3_1 && xhci->usb3_rhub.psi_count) {
- u32 ssp_cap_base, bm_attrib, psi;
+ u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
int offset;
ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
psi = xhci->usb3_rhub.psi[i];
psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+ psi_exp = XHCI_EXT_PORT_PSIE(psi);
+ psi_mant = XHCI_EXT_PORT_PSIM(psi);
+
+ /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
+ for (; psi_exp < 3; psi_exp++)
+ psi_mant /= 1000;
+ if (psi_mant >= 10)
+ psi |= BIT(14);
+
if ((psi & PLT_MASK) == PLT_SYM) {
/* Symmetric, create SSA RX and TX from one PSI entry */
put_unaligned_le32(psi, &buf[offset]);
@@ -411,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_free_command(xhci, cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto cmd_cleanup;
+ }
+
+ ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+ i, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, command);
+ goto cmd_cleanup;
}
- xhci_queue_stop_endpoint(xhci, command, slot_id, i,
- suspend);
}
}
- xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto cmd_cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -430,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
}
+
+cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
@@ -1506,9 +1528,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
- if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
- (hcd->speed < HCD_USB3))
- t2 &= ~PORT_WAKE_BITS;
} else
t2 &= ~PORT_WAKE_BITS;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 8071c8fdd15e..76f392954733 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,11 +54,6 @@
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
-
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
static const char hcd_name[] = "xhci_hcd";
@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
- if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
- ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
- xhci->quirks |= XHCI_U2_DISABLE_WAKE;
-
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 163bafde709f..1cb6eaef4ae1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -178,14 +178,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
* 2. xhci_plat is child of a device from firmware (dwc3-plat)
* 3. xhci_plat is grandchild of a pci device (dwc3-pci)
*/
- sysdev = &pdev->dev;
- if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
- sysdev = sysdev->parent;
+ for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
+ if (is_of_node(sysdev->fwnode) ||
+ is_acpi_device_node(sysdev->fwnode))
+ break;
#ifdef CONFIG_PCI
- else if (sysdev->parent && sysdev->parent->parent &&
- sysdev->parent->parent->bus == &pci_bus_type)
- sysdev = sysdev->parent->parent;
+ else if (sysdev->bus == &pci_bus_type)
+ break;
#endif
+ }
+
+ if (!sysdev)
+ sysdev = &pdev->dev;
/* Try to set 64-bit DMA first */
if (WARN_ON(!sysdev->dma_mask))
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a9443651ce0f..82c746e2d85c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
struct xhci_command *cur_cmd, *tmp_cmd;
+ xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
}
@@ -2579,15 +2580,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
(struct xhci_generic_trb *) ep_trb);
/*
- * No-op TRB should not trigger interrupts.
- * If ep_trb is a no-op TRB, it means the
- * corresponding TD has been cancelled. Just ignore
- * the TD.
+ * No-op TRB could trigger interrupts in a case where
+ * a URB was killed and a STALL_ERROR happens right
+ * after the endpoint ring stopped. Reset the halted
+ * endpoint. Otherwise, the endpoint remains stalled
+ * indefinitely.
*/
if (trb_is_noop(ep_trb)) {
- xhci_dbg(xhci,
- "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
- slot_id, ep_index);
+ if (trb_comp_code == COMP_STALL_ERROR ||
+ xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+ trb_comp_code))
+ xhci_cleanup_halted_endpoint(xhci, slot_id,
+ ep_index,
+ ep_ring->stream_id,
+ td, ep_trb,
+ EP_HARD_RESET);
goto cleanup;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b2ff1ff1a02f..51535ba2bcd4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (xhci->quirks & XHCI_MTK_HOST) {
ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
if (ret < 0) {
- xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
+ virt_dev->eps[ep_index].new_ring = NULL;
return ret;
}
}
@@ -4804,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
*/
hcd->has_tt = 1;
} else {
- if (xhci->sbrn == 0x31) {
+ /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+ if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2abaa4d6d39d..2b48aa4f6b76 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -735,6 +735,8 @@ struct xhci_ep_ctx {
#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
#define EP_HAS_LSA (1 << 15)
+/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
+#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff)
/* ep_info2 bitmasks */
/*
@@ -1681,7 +1683,7 @@ struct xhci_bus_state {
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
return 0;
else
return 1;
@@ -1826,7 +1828,7 @@ struct xhci_hcd {
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
-#define XHCI_U2_DISABLE_WAKE (1 << 27)
+/* Reserved. It was XHCI_U2_DISABLE_WAKE */
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
unsigned int num_active_eps;
@@ -2540,8 +2542,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
u8 lsa;
u8 hid;
- esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 |
- EP_MAX_ESIT_PAYLOAD_LO(tx_info);
+ esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+ CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
ep_state = info & EP_STATE_MASK;
max_pstr = info & EP_MAXPSTREAMS_MASK;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index eee82ca55b7b..b3fc602b2e24 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,12 +202,13 @@ found:
return tmp;
}
- if (in) {
+ if (in)
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ if (out)
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- }
+
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
@@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
int status = 0;
struct urb *urbs[param->sglen];
+ if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
+ return -EINVAL;
+
memset(&context, 0, sizeof(context));
context.count = param->iterations * param->sglen;
context.dev = dev;
@@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
if (param->iterations <= 0)
return -EINVAL;
+ if (param->sglen > MAX_SGLEN)
+ return -EINVAL;
/*
* Just a bunch of test cases that every HCD is expected to handle.
*
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 029692053dd3..ff5a1a8989d5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -906,7 +906,7 @@ b_host:
*/
if (int_usb & MUSB_INTR_RESET) {
handled = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_active(musb)) {
/*
* When BABBLE happens what we can depends on which
* platform MUSB is running, because some platforms
@@ -916,9 +916,7 @@ b_host:
* drop the session.
*/
dev_err(musb->controller, "Babble\n");
-
- if (is_host_active(musb))
- musb_recover_from_babble(musb);
+ musb_recover_from_babble(musb);
} else {
musb_dbg(musb, "BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (musb->quirk_retries--) {
+ if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
"Poll devctl on invalid vbus, assume no session");
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
-
+ musb->quirk_retries--;
return;
}
/* fall through */
case MUSB_QUIRK_A_DISCONNECT_19:
- if (musb->quirk_retries--) {
+ if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
"Poll devctl on possible host mode disconnect");
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
-
+ musb->quirk_retries--;
return;
}
if (!musb->session)
@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev)
musb_platform_disable(musb);
musb_disable_interrupts(musb);
+
+ musb->flush_irq_work = true;
+ while (flush_delayed_work(&musb->irq_work))
+ ;
+ musb->flush_irq_work = false;
+
if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
WARN_ON(!list_empty(&musb->pending_list));
spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index c748f4ac1154..20f4614178d9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -428,6 +428,8 @@ struct musb {
unsigned test_mode:1;
unsigned softconnect:1;
+ unsigned flush_irq_work:1;
+
u8 address;
u8 test_mode_nr;
u16 ackpend; /* ep0 */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ba255280a624..1ec0a4947b6b 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -26,15 +26,28 @@
#define MUSB_DMA_NUM_CHANNELS 15
+#define DA8XX_USB_MODE 0x10
+#define DA8XX_USB_AUTOREQ 0x14
+#define DA8XX_USB_TEARDOWN 0x1c
+
+#define DA8XX_DMA_NUM_CHANNELS 4
+
struct cppi41_dma_controller {
struct dma_controller controller;
- struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
- struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct cppi41_dma_channel *rx_channel;
+ struct cppi41_dma_channel *tx_channel;
struct hrtimer early_tx;
struct list_head early_tx_list;
u32 rx_mode;
u32 tx_mode;
u32 auto_req;
+
+ u32 tdown_reg;
+ u32 autoreq_reg;
+
+ void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode);
+ u8 num_channels;
};
static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
}
}
+static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+ unsigned int shift;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->tx_mode;
+ port = cppi41_channel->port_num;
+
+ shift = (port - 1) * 4;
+ if (!cppi41_channel->is_tx)
+ shift += 16;
+ new_mode = old_mode & ~(3 << shift);
+ new_mode |= mode << shift;
+
+ if (new_mode == old_mode)
+ return;
+ controller->tx_mode = new_mode;
+ musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
+}
+
+
static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
if (new_mode == old_mode)
return;
controller->auto_req = new_mode;
- musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
- new_mode);
+ musb_writel(controller->controller.musb->ctrl_base,
+ controller->autoreq_reg, new_mode);
}
static bool cppi41_configure_channel(struct dma_channel *channel,
@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
dma_addr_t dma_addr, u32 len)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), len);
/* gen rndis */
- cppi41_set_dma_mode(cppi41_channel,
+ controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_GEN_RNDIS);
/* auto req */
@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
} else {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), 0);
- cppi41_set_dma_mode(cppi41_channel,
+ controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_NONE);
}
} else {
/* fallback mode */
- cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+ controller->set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
len = min_t(u32, packet_sz, len);
}
@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
struct cppi41_dma_channel *cppi41_channel = NULL;
u8 ch_num = hw_ep->epnum - 1;
- if (ch_num >= MUSB_DMA_NUM_CHANNELS)
+ if (ch_num >= controller->num_channels)
return NULL;
if (is_tx)
@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
do {
if (is_tx)
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ musb_writel(musb->ctrl_base, controller->tdown_reg,
+ tdbit);
ret = dmaengine_terminate_all(cppi41_channel->dc);
} while (ret == -EAGAIN);
if (is_tx) {
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY) {
@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
struct dma_chan *dc;
int i;
- for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
+ for (i = 0; i < ctrl->num_channels; i++) {
dc = ctrl->tx_channel[i].dc;
if (dc)
dma_release_channel(dc);
@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
goto err;
ret = -EINVAL;
- if (port > MUSB_DMA_NUM_CHANNELS || !port)
+ if (port > controller->num_channels || !port)
goto err;
if (is_tx)
cppi41_channel = &controller->tx_channel[port - 1];
@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c)
hrtimer_cancel(&controller->early_tx);
cppi41_dma_controller_stop(controller);
+ kfree(controller->rx_channel);
+ kfree(controller->tx_channel);
kfree(controller);
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
@@ -705,6 +749,7 @@ struct dma_controller *
cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct cppi41_dma_controller *controller;
+ int channel_size;
int ret = 0;
if (!musb->controller->parent->of_node) {
@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
controller->controller.is_compatible = cppi41_is_compatible;
controller->controller.musb = musb;
+ if (musb->io.quirks & MUSB_DA8XX) {
+ controller->tdown_reg = DA8XX_USB_TEARDOWN;
+ controller->autoreq_reg = DA8XX_USB_AUTOREQ;
+ controller->set_dma_mode = da8xx_set_dma_mode;
+ controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
+ } else {
+ controller->tdown_reg = USB_TDOWN;
+ controller->autoreq_reg = USB_CTRL_AUTOREQ;
+ controller->set_dma_mode = cppi41_set_dma_mode;
+ controller->num_channels = MUSB_DMA_NUM_CHANNELS;
+ }
+
+ channel_size = controller->num_channels *
+ sizeof(struct cppi41_dma_channel);
+ controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->rx_channel)
+ goto rx_channel_alloc_fail;
+ controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->tx_channel)
+ goto tx_channel_alloc_fail;
+
ret = cppi41_dma_controller_start(controller);
if (ret)
goto plat_get_fail;
return &controller->controller;
plat_get_fail:
+ kfree(controller->tx_channel);
+tx_channel_alloc_fail:
+ kfree(controller->rx_channel);
+rx_channel_alloc_fail:
kfree(controller);
kzalloc_fail:
if (ret == -EPROBE_DEFER)
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index c9a09b5bb6e5..dc353e24d53c 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb)
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
+ devm_usb_put_phy(glue->dev, glue->xceiv);
+
return 0;
}
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 5fe4a5704bde..ccc2bf5274b4 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
+ /*
+ * The USB driver may have already initiated the phy clock
+ * disable so wait to see if the clock turns off and if not
+ * then proceed with gating the clock.
+ */
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0)
+ return;
+
if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
@@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
+ /*
+ * The USB driver may have already initiated the phy clock
+ * enable so wait to see if the clock turns on and if not
+ * then proceed with ungating the clock.
+ */
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID) == 0)
+ return;
+
if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index d1af831f43eb..50285b01da92 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ int ret = 0;
- if (!usbhs_pipe_is_dcp(pipe))
- usbhsf_fifo_barrier(priv, fifo);
+ if (!usbhs_pipe_is_dcp(pipe)) {
+ /*
+ * This driver checks the pipe condition first to avoid -EBUSY
+ * from usbhsf_fifo_barrier() with about 10 msec delay in
+ * the interrupt handler if the pipe is RX direction and empty.
+ */
+ if (usbhs_pipe_is_dir_in(pipe))
+ ret = usbhs_pipe_is_accessible(pipe);
+ if (!ret)
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ }
- usbhs_write(priv, fifo->ctr, BCLR);
+ /*
+ * if non-DCP pipe, this driver should set BCLR when
+ * usbhsf_fifo_barrier() returns 0.
+ */
+ if (!ret)
+ usbhs_write(priv, fifo->ctr, BCLR);
}
static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
@@ -842,9 +857,9 @@ static void xfer_work(struct work_struct *work)
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
usbhs_pipe_running(pipe, 1);
- usbhsf_dma_start(pipe, fifo);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
xfer_work_end:
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index fdf89800ebc3..43a862a90a77 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
+ info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
@@ -265,7 +266,7 @@ static struct console usbcons = {
void usb_serial_console_disconnect(struct usb_serial *serial)
{
- if (serial->port[0] == usbcons_info.port) {
+ if (serial->port[0] && serial->port[0] == usbcons_info.port) {
usb_serial_console_exit();
usb_serial_put(serial);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2d945c9f975c..412f812522ee 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CP210X_PARTNUM_CP2104 0x04
#define CP210X_PARTNUM_CP2105 0x05
#define CP210X_PARTNUM_CP2108 0x08
+#define CP210X_PARTNUM_UNKNOWN 0xFF
/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
struct cp210x_comm_status {
@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial)
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PARTNUM, &priv->partnum,
sizeof(priv->partnum));
- if (result < 0)
- goto err_free_priv;
+ if (result < 0) {
+ dev_warn(&serial->interface->dev,
+ "querying part number failed\n");
+ priv->partnum = CP210X_PARTNUM_UNKNOWN;
+ }
usb_set_serial_data(serial, priv);
@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial)
}
return 0;
-err_free_priv:
- kfree(priv);
-
- return result;
}
static void cp210x_disconnect(struct usb_serial *serial)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1cec03799cdf..49d1b2d4606d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4fcf1cecb6d7..f9d15bd62785 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -610,6 +610,13 @@
#define ADI_GNICEPLUS_PID 0xF001
/*
+ * Cypress WICED USB UART
+ */
+#define CYPRESS_VID 0x04B4
+#define CYPRESS_WICED_BT_USB_PID 0x009B
+#define CYPRESS_WICED_WL_USB_PID 0xF900
+
+/*
* Microchip Technology, Inc.
*
* MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index cc84da8dbb84..14511d6a7d44 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@ struct metrousb_private {
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
{ }, /* Terminating entry. */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 54bfef13966a..ba672cf4e888 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
+#define TPLINK_PRODUCT_LTE 0x000D
#define TPLINK_PRODUCT_MA180 0x0201
/* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index ebc0beea69d6..eb9928963a53 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1a59f335b063..a3ccb899df60 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -834,13 +834,25 @@ Retry_Sense:
if (result == USB_STOR_TRANSPORT_GOOD) {
srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0;
+ }
+
+ /*
+ * ATA-passthru commands use sense data to report
+ * the command completion status, and often devices
+ * return Check Condition status when nothing is
+ * wrong.
+ */
+ else if (srb->cmnd[0] == ATA_16 ||
+ srb->cmnd[0] == ATA_12) {
+ /* leave the data alone */
+ }
/*
* If there was a problem, report an unspecified
* hardware error to prevent the higher layers from
* entering an infinite retry loop.
*/
- } else {
+ else {
srb->result = DID_ERROR << 16;
if ((sshdr.response_code & 0x72) == 0x72)
srb->sense_buffer[1] = HARDWARE_ERROR;
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index f58caa9e6a27..a155cd02bce2 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
-static int uas_find_uas_alt_setting(struct usb_interface *intf)
+static struct usb_host_interface *uas_find_uas_alt_setting(
+ struct usb_interface *intf)
{
int i;
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt))
- return alt->desc.bAlternateSetting;
+ return alt;
}
- return -ENODEV;
+ return NULL;
}
static int uas_find_endpoints(struct usb_host_interface *alt,
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
unsigned long flags = id->driver_info;
- int r, alt;
-
+ struct usb_host_interface *alt;
+ int r;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
+ if (!alt)
return 0;
- r = uas_find_endpoints(&intf->altsetting[alt], eps);
+ r = uas_find_endpoints(alt, eps);
if (r < 0)
return 0;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index cfb1e3bbd434..63cf981ed81c 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
- int alt;
+ struct usb_host_interface *alt;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
- return alt;
+ if (!alt)
+ return -ENODEV;
- return usb_set_interface(udev,
- intf->altsetting[0].desc.bInterfaceNumber, alt);
+ return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
}
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 5a70c33ef0e0..eb06d88b41d6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
+/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
+UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
+ "Seagate",
+ "External",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT ),
+
UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
"Maxtor",
"USB to SATA",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 35a1e777b449..9a53912bdfe9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
if (iface->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
+ return -ENODEV;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
index 01c20a260a8b..39dd4ef53c77 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/uwb/uwbd.c
@@ -302,18 +302,22 @@ static int uwbd(void *param)
/** Start the UWB daemon */
void uwbd_start(struct uwb_rc *rc)
{
- rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
- if (rc->uwbd.task == NULL)
+ struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
+ if (IS_ERR(task)) {
+ rc->uwbd.task = NULL;
printk(KERN_ERR "UWB: Cannot start management daemon; "
"UWB won't work\n");
- else
+ } else {
+ rc->uwbd.task = task;
rc->uwbd.pid = rc->uwbd.task->pid;
+ }
}
/* Stop the UWB daemon and free any unprocessed events */
void uwbd_stop(struct uwb_rc *rc)
{
- kthread_stop(rc->uwbd.task);
+ if (rc->uwbd.task)
+ kthread_stop(rc->uwbd.task);
uwbd_flush(rc);
}
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 5fbfd9cfb6d6..5b3d57fc82d3 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -169,6 +169,9 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
{
struct pci_bar_info *bar = data;
+ unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4;
+ const struct resource *res = dev->resource;
+ u32 mask;
if (unlikely(!bar)) {
pr_warn(DRV_NAME ": driver data not found for %s\n",
@@ -179,7 +182,13 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
/* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes
*/
- if (value == ~0)
+ if (res[pos].flags & IORESOURCE_IO)
+ mask = ~PCI_BASE_ADDRESS_IO_MASK;
+ else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64))
+ mask = 0;
+ else
+ mask = ~PCI_BASE_ADDRESS_MEM_MASK;
+ if ((value | mask) == ~0U)
bar->which = 1;
else {
u32 tmpval;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 82a8866758ee..a1c17000129b 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
return err;
}
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- grant_ref_t *gnt_refs,
- unsigned int nr_grefs,
- void **vaddr)
-{
- struct xenbus_map_node *node;
- struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- int err = GNTST_okay;
- int i;
- bool leaked;
-
- *vaddr = NULL;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
- if (!area) {
- kfree(node);
- return -ENOMEM;
- }
-
- for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
-
- err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
- phys_addrs,
- GNTMAP_host_map | GNTMAP_contains_pte,
- &leaked);
- if (err)
- goto failed;
-
- node->nr_handles = nr_grefs;
- node->pv.area = area;
-
- spin_lock(&xenbus_valloc_lock);
- list_add(&node->next, &xenbus_valloc_pages);
- spin_unlock(&xenbus_valloc_lock);
-
- *vaddr = area->addr;
- return 0;
-
-failed:
- if (!leaked)
- free_vm_area(area);
- else
- pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
-
- kfree(node);
- return err;
-}
-
struct map_ring_valloc_hvm
{
unsigned int idx;
@@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+#ifdef CONFIG_XEN_PV
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
+{
+ struct xenbus_map_node *node;
+ struct vm_struct *area;
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+ int err = GNTST_okay;
+ int i;
+ bool leaked;
+
+ *vaddr = NULL;
+
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
+ if (!area) {
+ kfree(node);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_grefs; i++)
+ phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+
+ err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
+ phys_addrs,
+ GNTMAP_host_map | GNTMAP_contains_pte,
+ &leaked);
+ if (err)
+ goto failed;
+
+ node->nr_handles = nr_grefs;
+ node->pv.area = area;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
+
+ *vaddr = area->addr;
+ return 0;
+
+failed:
+ if (!leaked)
+ free_vm_area(area);
+ else
+ pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
+
+ kfree(node);
+ return err;
+}
+
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
@@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
return err;
}
+static const struct xenbus_ring_ops ring_ops_pv = {
+ .map = xenbus_map_ring_valloc_pv,
+ .unmap = xenbus_unmap_ring_vfree_pv,
+};
+#endif
+
struct unmap_ring_vfree_hvm
{
unsigned int idx;
@@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
-static const struct xenbus_ring_ops ring_ops_pv = {
- .map = xenbus_map_ring_valloc_pv,
- .unmap = xenbus_unmap_ring_vfree_pv,
-};
-
static const struct xenbus_ring_ops ring_ops_hvm = {
.map = xenbus_map_ring_valloc_hvm,
.unmap = xenbus_unmap_ring_vfree_hvm,
@@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
void __init xenbus_ring_ops_init(void)
{
+#ifdef CONFIG_XEN_PV
if (!xen_feature(XENFEAT_auto_translated_physmap))
ring_ops = &ring_ops_pv;
else
+#endif
ring_ops = &ring_ops_hvm;
}