summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c11
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/android/binder.c10
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_brcm.c7
-rw-r--r--drivers/ata/libata-core.c328
-rw-r--r--drivers/ata/libata-sata.c25
-rw-r--r--drivers/ata/libata-scsi.c46
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/ata/libata.h13
-rw-r--r--drivers/ata/pata_ftide010.c7
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c3
-rw-r--r--drivers/ata/pata_sil680.c30
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/sata_dwc_460ex.c6
-rw-r--r--drivers/ata/sata_gemini.c7
-rw-r--r--drivers/base/arch_topology.c11
-rw-r--r--drivers/base/dd.c1
-rw-r--r--drivers/base/firmware_loader/main.c17
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/block/Kconfig16
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/aoe/aoemain.c10
-rw-r--r--drivers/block/ataflop.c10
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h8
-rw-r--r--drivers/block/drbd/drbd_main.c69
-rw-r--r--drivers/block/drbd/drbd_nl.c168
-rw-r--r--drivers/block/drbd/drbd_receiver.c28
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_state.c21
-rw-r--r--drivers/block/drbd/drbd_state_change.h8
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c61
-rw-r--r--drivers/block/loop.c393
-rw-r--r--drivers/block/loop.h72
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c37
-rw-r--r--drivers/block/null_blk/main.c95
-rw-r--r--drivers/block/null_blk/null_blk.h8
-rw-r--r--drivers/block/null_blk/zoned.c7
-rw-r--r--drivers/block/pktcdvd.c36
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c20
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h10
-rw-r--r--drivers/block/rnbd/rnbd-srv.c5
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/block/zram/zram_drv.c35
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c6
-rw-r--r--drivers/bus/imx-weim.c5
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c16
-rw-r--r--drivers/cdrom/cdrom.c41
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c5
-rw-r--r--drivers/char/random.c113
-rw-r--r--drivers/clk/at91/clk-generated.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/microchip/clk-mpfs.c195
-rw-r--r--drivers/clk/qcom/clk-rcg2.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c17
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c70
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c1
-rw-r--r--drivers/crypto/qcom-rng.c1
-rw-r--r--drivers/cxl/pci.c1
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/dma-buf.c16
-rw-r--r--drivers/dma-buf/dma-fence-array.c32
-rw-r--r--drivers/dma-buf/selftests.h1
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c261
-rw-r--r--drivers/dma-buf/sync_file.c141
-rw-r--r--drivers/dma/at_xdmac.c12
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c16
-rw-r--r--drivers/dma/idxd/device.c6
-rw-r--r--drivers/dma/idxd/submit.c5
-rw-r--r--drivers/dma/idxd/sysfs.c6
-rw-r--r--drivers/dma/imx-sdma.c32
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c9
-rw-r--r--drivers/edac/Kconfig3
-rw-r--r--drivers/edac/armada_xp_edac.c18
-rw-r--r--drivers/edac/dmc520_edac.c2
-rw-r--r--drivers/edac/edac_device.c130
-rw-r--r--drivers/edac/edac_device.h14
-rw-r--r--drivers/edac/edac_device_sysfs.c5
-rw-r--r--drivers/edac/edac_mc.c96
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c25
-rw-r--r--drivers/edac/ghes_edac.c202
-rw-r--r--drivers/edac/i5100_edac.c5
-rw-r--r--drivers/edac/mpc85xx_edac.c14
-rw-r--r--drivers/edac/synopsys_edac.c31
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/firewire/core-card.c3
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/core-topology.c9
-rw-r--r--drivers/firewire/core-transaction.c30
-rw-r--r--drivers/firewire/sbp2.c13
-rw-r--r--drivers/firmware/arm_scmi/clock.c5
-rw-r--r--drivers/firmware/arm_scmi/driver.c3
-rw-r--r--drivers/firmware/arm_scmi/optee.c8
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c3
-rw-r--r--drivers/firmware/efi/Kconfig43
-rw-r--r--drivers/firmware/efi/cper.c64
-rw-r--r--drivers/firmware/efi/efi.c13
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c15
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h84
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c11
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c119
-rw-r--r--drivers/gpio/TODO19
-rw-r--r--drivers/gpio/gpio-mvebu.c10
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pl061.c32
-rw-r--r--drivers/gpio/gpio-sim.c4
-rw-r--r--drivers/gpio/gpio-tegra186.c32
-rw-r--r--drivers/gpio/gpio-vf610.c8
-rw-r--r--drivers/gpio/gpio-visconti.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c22
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c5
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c56
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c35
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/dp/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_of.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c11
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c8
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c3
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c3
-rw-r--r--drivers/gpu/drm/vc4/Kconfig3
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c5
-rw-r--r--drivers/hv/channel_mgmt.c6
-rw-r--r--drivers/hv/hv_balloon.c49
-rw-r--r--drivers/hv/hv_common.c11
-rw-r--r--drivers/hv/ring_buffer.c11
-rw-r--r--drivers/hv/vmbus_drv.c65
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adt7470.c4
-rw-r--r--drivers/hwmon/asus_wmi_sensors.c2
-rw-r--r--drivers/hwmon/f71882fg.c5
-rw-r--r--drivers/hwmon/pmbus/delta-ahe50dc-fan.c16
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c3
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c2
-rw-r--r--drivers/hwmon/tmp401.c11
-rw-r--r--drivers/i2c/busses/i2c-imx.c33
-rw-r--r--drivers/i2c/busses/i2c-ismt.c18
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c10
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c1
-rw-r--r--drivers/i2c/i2c-dev.c17
-rw-r--r--drivers/idle/intel_idle.c27
-rw-r--r--drivers/iio/adc/ad7280a.c12
-rw-r--r--drivers/iio/chemical/scd4x.c5
-rw-r--r--drivers/iio/dac/ad3552r.c6
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/ltc2688.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c28
-rw-r--r--drivers/iio/filter/Kconfig1
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c20
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c15
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/proximity/sx9324.c32
-rw-r--r--drivers/iio/proximity/sx_common.c1
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c6
-rw-r--r--drivers/infiniband/hw/irdma/cm.c33
-rw-r--r--drivers/infiniband/hw/irdma/utils.c21
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c81
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c35
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/input/keyboard/cypress-sf.c14
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/input/touchscreen/ili210x.c20
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/interconnect/qcom/sc7180.c21
-rw-r--r--drivers/interconnect/qcom/sdx55.c21
-rw-r--r--drivers/iommu/apple-dart.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c30
-rw-r--r--drivers/iommu/intel/iommu.c27
-rw-r--r--drivers/iommu/intel/svm.c4
-rw-r--r--drivers/iommu/iommu.c9
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig13
-rw-r--r--drivers/irqchip/Makefile6
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c56
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c4
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c4
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c30
-rw-r--r--drivers/irqchip/irq-gic-v3.c270
-rw-r--r--drivers/irqchip/irq-gic.c12
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c16
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c2
-rw-r--r--drivers/irqchip/irq-sni-exiu.c25
-rw-r--r--drivers/irqchip/irq-sun6i-r.c6
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c18
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/debug.c10
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c9
-rw-r--r--drivers/md/dm-cache-target.c9
-rw-r--r--drivers/md/dm-clone-target.c9
-rw-r--r--drivers/md/dm-integrity.c7
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-log-writes.c3
-rw-r--r--drivers/md/dm-ps-historical-service-time.c11
-rw-r--r--drivers/md/dm-raid.c9
-rw-r--r--drivers/md/dm-table.c25
-rw-r--r--drivers/md/dm-thin.c15
-rw-r--r--drivers/md/dm-zone.c49
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md-bitmap.c45
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md-linear.c11
-rw-r--r--drivers/md/md.c67
-rw-r--r--drivers/md/md.h62
-rw-r--r--drivers/md/raid0.c38
-rw-r--r--drivers/md/raid1.c73
-rw-r--r--drivers/md/raid10.c81
-rw-r--r--drivers/md/raid5-cache.c8
-rw-r--r--drivers/md/raid5-ppl.c13
-rw-r--r--drivers/md/raid5.c241
-rw-r--r--drivers/md/raid5.h23
-rw-r--r--drivers/media/platform/nxp/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/tuners/si2157.c22
-rw-r--r--drivers/memory/atmel-ebi.c23
-rw-r--r--drivers/memory/fsl_ifc.c3
-rw-r--r--drivers/memory/renesas-rpc-if.c70
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/misc/eeprom/at25.c19
-rw-r--r--drivers/misc/habanalabs/common/memory.c16
-rw-r--r--drivers/mmc/core/block.c48
-rw-r--r--drivers/mmc/core/core.c5
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/mmc_test.c3
-rw-r--r--drivers/mmc/core/queue.c3
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c6
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c8
-rw-r--r--drivers/mmc/host/sdhci-msm.c42
-rw-r--r--drivers/mmc/host/sdhci-xenon.c10
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c12
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c24
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c14
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/can/grcan.c46
-rw-r--r--drivers/net/can/m_can/m_can.c24
-rw-r--r--drivers/net/can/m_can/m_can.h3
-rw-r--r--drivers/net/can/m_can/m_can_pci.c48
-rw-r--r--drivers/net/dsa/b53/b53_common.c36
-rw-r--r--drivers/net/dsa/b53/b53_priv.h24
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c74
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h9
-rw-r--r--drivers/net/dsa/b53/b53_srab.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c38
-rw-r--r--drivers/net/dsa/mt7530.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c5
-rw-r--r--drivers/net/dsa/ocelot/felix.c30
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2
-rw-r--r--drivers/net/dsa/realtek/Kconfig30
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c1
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c9
-rw-r--r--drivers/net/ethernet/Kconfig26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c10
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c31
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c129
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c157
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c37
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c27
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c9
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c5
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c155
-rw-r--r--drivers/net/ethernet/sfc/ptp.c14
-rw-r--r--drivers/net/ethernet/sfc/ptp.h1
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c33
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c30
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/ipa/gsi.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c13
-rw-r--r--drivers/net/ipa/ipa_qmi.c2
-rw-r--r--drivers/net/macvlan.c8
-rw-r--r--drivers/net/mctp/mctp-i2c.c2
-rw-r--r--drivers/net/mdio/fwnode_mdio.c5
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/micrel.c116
-rw-r--r--drivers/net/phy/microchip_t1.c8
-rw-r--r--drivers/net/phy/phy.c7
-rw-r--r--drivers/net/phy/sfp.c12
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/aqc111.c9
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/virtio_net.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c6
-rw-r--r--drivers/net/vrf.c15
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wireguard/device.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c93
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c43
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c2
-rw-r--r--drivers/nfc/nfcmrvl/main.c2
-rw-r--r--drivers/nfc/pn533/pn533.c5
-rw-r--r--drivers/nvme/host/constants.c5
-rw-r--r--drivers/nvme/host/core.c138
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c26
-rw-r--r--drivers/nvme/host/ioctl.c278
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h11
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/nvme/host/rdma.c5
-rw-r--r--drivers/nvme/host/tcp.c5
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/zns.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-aardvark.c48
-rw-r--r--drivers/pci/controller/pci-hyperv.c9
-rw-r--r--drivers/pci/pci.c10
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c20
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5250-sata.c21
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c2
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c1
-rw-r--r--drivers/phy/ti/phy-tusb1210.c12
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c60
-rw-r--r--drivers/pinctrl/mediatek/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c11
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c29
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c4
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c69
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c53
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c16
-rw-r--r--drivers/pinctrl/samsung/Kconfig11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c23
-rw-r--r--drivers/pinctrl/sunplus/sppctl_sp7021.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c4
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c8
-rw-r--r--drivers/platform/x86/acerhdf.c21
-rw-r--r--drivers/platform/x86/amd-pmc.c14
-rw-r--r--drivers/platform/x86/asus-wmi.c15
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c1
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c13
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel/pmc/core.h2
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c2
-rw-r--r--drivers/platform/x86/intel/sdsi.c44
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c3
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/think-lmi.c44
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c175
-rw-r--r--drivers/power/supply/power_supply_core.c6
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c2
-rw-r--r--drivers/ptp/ptp_ocp.c64
-rw-r--r--drivers/regulator/atc260x-regulator.c1
-rw-r--r--drivers/regulator/rtq2134-regulator.c1
-rw-r--r--drivers/regulator/wm8994-regulator.c42
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c4
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/rtc/rtc-sun6i.c17
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dasd_eckd.c33
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h14
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/ctcm_sysfs.c5
-rw-r--r--drivers/s390/net/lcs.c7
-rw-r--r--drivers/scsi/aha152x.c235
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/isci/host.c6
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c81
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c52
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c33
-rw-r--r--drivers/scsi/pmcraid.c491
-rw-r--r--drivers/scsi/pmcraid.h33
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c69
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/scsi_debug.c205
-rw-r--r--drivers/scsi/scsi_logging.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c239
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/sr_ioctl.c15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c15
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c17
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/ufs/ufshpb.c30
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/zorro7xx.c2
-rw-r--r--drivers/slimbus/qcom-ctrl.c4
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c2
-rw-r--r--drivers/spi/atmel-quadspi.c3
-rw-r--r--drivers/spi/spi-bcm-qspi.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c65
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-mtk-nor.c12
-rw-r--r--drivers/spi/spi-mxic.c1
-rw-r--r--drivers/spi/spi-rpc-if.c8
-rw-r--r--drivers/spi/spi.c7
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c32
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c24
-rw-r--r--drivers/target/target_core_device.c20
-rw-r--r--drivers/target/target_core_file.c10
-rw-r--r--drivers/target/target_core_iblock.c17
-rw-r--r--drivers/target/target_core_pscsi.c46
-rw-r--r--drivers/target/target_core_user.c3
-rw-r--r--drivers/tee/optee/ffa_abi.c1
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/gov_user_space.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c52
-rw-r--r--drivers/thermal/thermal_sysfs.c3
-rw-r--r--drivers/tty/n_gsm.c495
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c29
-rw-r--r--drivers/tty/serial/8250/8250_pci.c8
-rw-r--r--drivers/tty/serial/8250/8250_port.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/digicolor-usart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c18
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c20
-rw-r--r--drivers/tty/serial/sc16is7xx.c6
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c7
-rw-r--r--drivers/usb/class/cdc-wdm.c1
-rw-r--r--drivers/usb/core/devio.c14
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc3/core.c34
-rw-r--r--drivers/usb/dwc3/drd.c11
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c8
-rw-r--r--drivers/usb/dwc3/gadget.c31
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_uvc.c25
-rw-r--r--drivers/usb/gadget/function/uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c23
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c90
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci-tegra.c4
-rw-r--r--drivers/usb/host/xhci.c11
-rw-r--r--drivers/usb/misc/qcom_eud.c10
-rw-r--r--drivers/usb/misc/uss720.c3
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c6
-rw-r--r--drivers/usb/phy/phy-generic.c7
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/whiteheat.c5
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c26
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c24
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c123
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c124
-rw-r--r--drivers/vhost/net.c15
-rw-r--r--drivers/video/fbdev/arkfb.c3
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c1
-rw-r--r--drivers/video/fbdev/aty/radeonfb.h2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c3
-rw-r--r--drivers/video/fbdev/controlfb.c3
-rw-r--r--drivers/video/fbdev/core/fbmem.c9
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/efifb.c9
-rw-r--r--drivers/video/fbdev/i740fb.c5
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/mmp/core.c11
-rw-r--r--drivers/video/fbdev/neofb.c2
-rw-r--r--drivers/video/fbdev/omap/hwa742.c6
-rw-r--r--drivers/video/fbdev/omap/lcdc.c6
-rw-r--r--drivers/video/fbdev/omap/sossi.c5
-rw-r--r--drivers/video/fbdev/platinumfb.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c8
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/s3fb.c3
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c3
-rw-r--r--drivers/video/fbdev/simplefb.c8
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c3
-rw-r--r--drivers/video/fbdev/udlfb.c14
-rw-r--r--drivers/video/fbdev/valkyriefb.c3
-rw-r--r--drivers/video/fbdev/vesafb.c8
-rw-r--r--drivers/video/fbdev/vt8623fb.c3
-rw-r--r--drivers/video/of_display_timing.c2
-rw-r--r--drivers/virt/Kconfig2
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig16
-rw-r--r--drivers/virt/coco/efi_secret/Makefile2
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c349
-rw-r--r--drivers/virtio/virtio.c5
-rw-r--r--drivers/xen/balloon.c54
-rw-r--r--drivers/xen/gntalloc.c4
-rw-r--r--drivers/xen/unpopulated-alloc.c33
807 files changed, 10068 insertions, 6610 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 32b20efff5f8..eb95e188d62b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,11 +96,6 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
- /* T40 can not handle C3 idle state */
- { set_max_cstate, "IBM ThinkPad T40", {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
- (void *)2},
{},
};
@@ -570,8 +565,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
- if (cx->type == ACPI_STATE_C3)
- ACPI_FLUSH_CPU_CACHE();
+ ACPI_FLUSH_CPU_CACHE();
while (1) {
@@ -796,7 +790,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
- drv->safe_state_index = count;
+ if (cx->type != ACPI_STATE_C3)
+ drv->safe_state_index = count;
}
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 9efbfe087de7..762b61f67e6c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -588,19 +588,6 @@ static struct acpi_device *handle_to_device(acpi_handle handle,
return adev;
}
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
-{
- if (!device)
- return -EINVAL;
-
- *device = handle_to_device(handle, NULL);
- if (!*device)
- return -ENODEV;
-
- return 0;
-}
-EXPORT_SYMBOL(acpi_bus_get_device);
-
/**
* acpi_fetch_acpi_dev - Retrieve ACPI device object.
* @handle: ACPI handle associated with the requested ACPI device object.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8351c5638880..f3b639e89dd8 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2295,6 +2295,7 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
{
int ret = 0;
struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *tmppf;
struct binder_ptr_fixup *pf =
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
node);
@@ -2349,7 +2350,11 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
list_del(&sgc->node);
kfree(sgc);
}
- BUG_ON(!list_empty(pf_head));
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ BUG_ON(pf->skip_size == 0);
+ list_del(&pf->node);
+ kfree(pf);
+ }
BUG_ON(!list_empty(sgc_head));
return ret > 0 ? -EINVAL : ret;
@@ -2486,6 +2491,9 @@ static int binder_translate_fd_array(struct list_head *pf_head,
struct binder_proc *proc = thread->proc;
int ret;
+ if (fda->num_fds == 0)
+ return 0;
+
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e5641e6c52ee..bb45a9c00514 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,14 +115,16 @@ config SATA_AHCI
If unsure, say N.
-config SATA_LPM_POLICY
+config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy for low power chipsets"
range 0 4
default 0
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
- for chipsets / "South Bridges" designated as supporting low power.
+ for chipsets / "South Bridges" supporting low-power modes. Such
+ chipsets are typically found on most laptops but desktops and
+ servers now also widely use chipsets supporting low power modes.
The value set has the following meanings:
0 => Keep firmware settings
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 84456c05e845..c1eca72b4575 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -324,7 +324,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
@@ -367,7 +366,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg/Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* *burg SATA0 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* *burg SATA1 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x282f), board_ahci }, /* *burg SATA2 'RAID' */
{ PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
@@ -1595,7 +1596,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
static void ahci_update_initial_lpm_policy(struct ata_port *ap,
struct ahci_host_priv *hpriv)
{
- int policy = CONFIG_SATA_LPM_POLICY;
+ int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
/* Ignore processing for chipsets that don't use policy */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 6ead58c1b6e5..ad11a4c52fbe 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -236,7 +236,7 @@ enum {
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use
- SATA_LPM_POLICY
+ SATA_MOBILE_LPM_POLICY
as default lpm_policy */
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
suspend/resume */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index ab8552b1ff2a..f61795c546cf 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -549,15 +549,10 @@ static int brcm_ahci_remove(struct platform_device *pdev)
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
brcm_sata_phys_disable(priv);
- ret = ata_platform_remove_one(pdev);
- if (ret)
- return ret;
-
- return 0;
+ return ata_platform_remove_one(pdev);
}
static void brcm_ahci_shutdown(struct platform_device *pdev)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cceedde51126..40e816419f48 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -96,7 +96,8 @@ struct ata_force_param {
unsigned long xfer_mask;
unsigned int horkage_on;
unsigned int horkage_off;
- u16 lflags;
+ u16 lflags_on;
+ u16 lflags_off;
};
struct ata_force_ent {
@@ -386,11 +387,17 @@ static void ata_force_link_limits(struct ata_link *link)
}
/* let lflags stack */
- if (fe->param.lflags) {
- link->flags |= fe->param.lflags;
+ if (fe->param.lflags_on) {
+ link->flags |= fe->param.lflags_on;
ata_link_notice(link,
"FORCE: link flag 0x%x forced -> 0x%x\n",
- fe->param.lflags, link->flags);
+ fe->param.lflags_on, link->flags);
+ }
+ if (fe->param.lflags_off) {
+ link->flags &= ~fe->param.lflags_off;
+ ata_link_notice(link,
+ "FORCE: link flag 0x%x cleared -> 0x%x\n",
+ fe->param.lflags_off, link->flags);
}
}
}
@@ -898,7 +905,7 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
* RETURNS:
* Matching xfer_shift, -1 if no match found.
*/
-int ata_xfer_mode2shift(unsigned long xfer_mode)
+int ata_xfer_mode2shift(u8 xfer_mode)
{
const struct ata_xfer_ent *ent;
@@ -1398,7 +1405,7 @@ unsigned long ata_id_xfermask(const u16 *id)
/* But wait.. there's more. Design your standards by
* committee and you too can get a free iordy field to
- * process. However its the speeds not the modes that
+ * process. However it is the speeds not the modes that
* are supported... Note drivers using the timing API
* will get this right anyway
*/
@@ -3898,7 +3905,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Devices where NCQ should be avoided */
/* NCQ is slow */
{ "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
+ { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
@@ -3924,23 +3931,23 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
{ "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
- ATA_HORKAGE_NOLPM, },
+ ATA_HORKAGE_NOLPM },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
- { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
- { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
- { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
- { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
+ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
/* Sandisk SD7/8/9s lock up hard on large trims */
- { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
+ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
/* devices which puke on READ_NATIVE_MAX */
- { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
+ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
{ "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
@@ -3949,22 +3956,22 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
/* Devices which report 1 sector over size HPA */
- { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
- { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
- { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
+ { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
+ { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
/* Devices which get the IVB wrong */
- { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
+ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
/* Maybe we should just blacklist TSSTcorp... */
- { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
/* Devices that do not need bridging limits applied */
- { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
- { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
+ { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
+ { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
/* Devices which aren't very happy with higher link speeds */
- { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
- { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
+ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
+ { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
/*
* Devices which choke on SETXFER. Applies only if both the
@@ -3982,54 +3989,57 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM, },
+ ATA_HORKAGE_NOLPM },
/* 512GB MX100 with newer firmware has only LPM issues */
{ "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM, },
+ ATA_HORKAGE_NOLPM },
/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
{ "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM, },
+ ATA_HORKAGE_NOLPM },
{ "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM, },
+ ATA_HORKAGE_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
- { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
- { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
- { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
- { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
+ { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
+ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_NO_DMA_LOG |
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI, },
+ ATA_HORKAGE_NO_NCQ_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI, },
+ ATA_HORKAGE_NO_NCQ_ON_ATI },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
/* devices that don't properly handle TRIM commands */
- { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
- { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
+ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4047,16 +4057,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
* that model before whitelisting all other intel SSDs.
*/
- { "INTEL*SSDSC2MH*", NULL, 0, },
+ { "INTEL*SSDSC2MH*", NULL, 0 },
- { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4564,42 +4574,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
}
/**
- * ata_qc_new_init - Request an available ATA command, and initialize it
- * @dev: Device from whom we request an available command structure
- * @tag: tag
- *
- * LOCKING:
- * None.
- */
-
-struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
-{
- struct ata_port *ap = dev->link->ap;
- struct ata_queued_cmd *qc;
-
- /* no command while frozen */
- if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
- return NULL;
-
- /* libsas case */
- if (ap->flags & ATA_FLAG_SAS_HOST) {
- tag = ata_sas_allocate_tag(ap);
- if (tag < 0)
- return NULL;
- }
-
- qc = __ata_qc_from_tag(ap, tag);
- qc->tag = qc->hw_tag = tag;
- qc->scsicmd = NULL;
- qc->ap = ap;
- qc->dev = dev;
-
- ata_qc_reinit(qc);
-
- return qc;
-}
-
-/**
* ata_qc_free - free unused ata_queued_cmd
* @qc: Command to complete
*
@@ -4611,19 +4585,9 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
*/
void ata_qc_free(struct ata_queued_cmd *qc)
{
- struct ata_port *ap;
- unsigned int tag;
-
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- ap = qc->ap;
-
qc->flags = 0;
- tag = qc->tag;
- if (ata_tag_valid(tag)) {
+ if (ata_tag_valid(qc->tag))
qc->tag = ATA_TAG_POISON;
- if (ap->flags & ATA_FLAG_SAS_HOST)
- ata_sas_free_tag(tag, ap);
- }
}
void __ata_qc_complete(struct ata_queued_cmd *qc)
@@ -5602,7 +5566,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
* Start and then freeze ports of @host. Started status is
* recorded in host->flags, so this function can be called
* multiple times. Ports are guaranteed to get started only
- * once. If host->ops isn't initialized yet, its set to the
+ * once. If host->ops is not initialized yet, it is set to the
* first non-dummy port ops.
*
* LOCKING:
@@ -6143,67 +6107,113 @@ int ata_platform_remove_one(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(ata_platform_remove_one);
#ifdef CONFIG_ATA_FORCE
+
+#define force_cbl(name, flag) \
+ { #name, .cbl = (flag) }
+
+#define force_spd_limit(spd, val) \
+ { #spd, .spd_limit = (val) }
+
+#define force_xfer(mode, shift) \
+ { #mode, .xfer_mask = (1UL << (shift)) }
+
+#define force_lflag_on(name, flags) \
+ { #name, .lflags_on = (flags) }
+
+#define force_lflag_onoff(name, flags) \
+ { "no" #name, .lflags_on = (flags) }, \
+ { #name, .lflags_off = (flags) }
+
+#define force_horkage_on(name, flag) \
+ { #name, .horkage_on = (flag) }
+
+#define force_horkage_onoff(name, flag) \
+ { "no" #name, .horkage_on = (flag) }, \
+ { #name, .horkage_off = (flag) }
+
+static const struct ata_force_param force_tbl[] __initconst = {
+ force_cbl(40c, ATA_CBL_PATA40),
+ force_cbl(80c, ATA_CBL_PATA80),
+ force_cbl(short40c, ATA_CBL_PATA40_SHORT),
+ force_cbl(unk, ATA_CBL_PATA_UNK),
+ force_cbl(ign, ATA_CBL_PATA_IGN),
+ force_cbl(sata, ATA_CBL_SATA),
+
+ force_spd_limit(1.5Gbps, 1),
+ force_spd_limit(3.0Gbps, 2),
+
+ force_xfer(pio0, ATA_SHIFT_PIO + 0),
+ force_xfer(pio1, ATA_SHIFT_PIO + 1),
+ force_xfer(pio2, ATA_SHIFT_PIO + 2),
+ force_xfer(pio3, ATA_SHIFT_PIO + 3),
+ force_xfer(pio4, ATA_SHIFT_PIO + 4),
+ force_xfer(pio5, ATA_SHIFT_PIO + 5),
+ force_xfer(pio6, ATA_SHIFT_PIO + 6),
+ force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
+ force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
+ force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
+ force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
+ force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
+ force_xfer(udma0, ATA_SHIFT_UDMA + 0),
+ force_xfer(udma16, ATA_SHIFT_UDMA + 0),
+ force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
+ force_xfer(udma1, ATA_SHIFT_UDMA + 1),
+ force_xfer(udma25, ATA_SHIFT_UDMA + 1),
+ force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
+ force_xfer(udma2, ATA_SHIFT_UDMA + 2),
+ force_xfer(udma33, ATA_SHIFT_UDMA + 2),
+ force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
+ force_xfer(udma3, ATA_SHIFT_UDMA + 3),
+ force_xfer(udma44, ATA_SHIFT_UDMA + 3),
+ force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
+ force_xfer(udma4, ATA_SHIFT_UDMA + 4),
+ force_xfer(udma66, ATA_SHIFT_UDMA + 4),
+ force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
+ force_xfer(udma5, ATA_SHIFT_UDMA + 5),
+ force_xfer(udma100, ATA_SHIFT_UDMA + 5),
+ force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
+ force_xfer(udma6, ATA_SHIFT_UDMA + 6),
+ force_xfer(udma133, ATA_SHIFT_UDMA + 6),
+ force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
+ force_xfer(udma7, ATA_SHIFT_UDMA + 7),
+
+ force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
+ force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
+ force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
+ force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
+ force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
+
+ force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
+ force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
+ force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
+
+ force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
+ force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
+ force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
+
+ force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
+ force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
+ force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
+
+ force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
+ force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
+ force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
+
+ force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
+ force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
+ force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
+
+ force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
+ force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
+ force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
+
+ force_horkage_on(disable, ATA_HORKAGE_DISABLE),
+};
+
static int __init ata_parse_force_one(char **cur,
struct ata_force_ent *force_ent,
const char **reason)
{
- static const struct ata_force_param force_tbl[] __initconst = {
- { "40c", .cbl = ATA_CBL_PATA40 },
- { "80c", .cbl = ATA_CBL_PATA80 },
- { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
- { "unk", .cbl = ATA_CBL_PATA_UNK },
- { "ign", .cbl = ATA_CBL_PATA_IGN },
- { "sata", .cbl = ATA_CBL_SATA },
- { "1.5Gbps", .spd_limit = 1 },
- { "3.0Gbps", .spd_limit = 2 },
- { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
- { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
- { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
- { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
- { "noncqati", .horkage_on = ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "ncqati", .horkage_off = ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
- { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
- { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
- { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
- { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
- { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
- { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
- { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
- { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
- { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
- { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
- { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
- { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
- { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
- { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
- { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
- { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
- { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
- { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
- { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
- { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
- { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
- { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
- { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
- { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
- { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
- { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
- { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
- { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
- { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
- { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
- { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
- { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
- { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
- { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
- { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
- { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
- { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
- { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
- { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
- { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
- };
char *start = *cur, *p = *cur;
char *id, *val, *endp;
const struct ata_force_param *match_fp = NULL;
@@ -6285,7 +6295,7 @@ static void __init ata_parse_force_param(void)
int last_port = -1, last_device = -1;
char *p, *cur, *next;
- /* calculate maximum number of params and allocate force_tbl */
+ /* Calculate maximum number of params and allocate ata_force_tbl */
for (p = ata_force_param_buf; *p; p++)
if (*p == ',')
size++;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 044a16daa2d4..7a5fe41aa5ae 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1268,31 +1268,6 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
-int ata_sas_allocate_tag(struct ata_port *ap)
-{
- unsigned int max_queue = ap->host->n_tags;
- unsigned int i, tag;
-
- for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
- tag = tag < max_queue ? tag : 0;
-
- /* the last tag is reserved for internal command. */
- if (ata_tag_internal(tag))
- continue;
-
- if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
- ap->sas_last_tag = tag;
- return tag;
- }
- }
- return -1;
-}
-
-void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
-{
- clear_bit(tag, &ap->sas_tag_allocated);
-}
-
/**
* sata_async_notification - SATA async notification handler
* @ap: ATA port where async notification is received
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 06c9d90238d9..42cecf95a4e5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -638,24 +638,48 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
struct scsi_cmnd *cmd)
{
+ struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc;
+ int tag;
- qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
- if (qc) {
- qc->scsicmd = cmd;
- qc->scsidone = scsi_done;
-
- qc->sg = scsi_sglist(cmd);
- qc->n_elem = scsi_sg_count(cmd);
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ goto fail;
- if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
- qc->flags |= ATA_QCFLAG_QUIET;
+ if (ap->flags & ATA_FLAG_SAS_HOST) {
+ /*
+ * SAS hosts may queue > ATA_MAX_QUEUE commands so use
+ * unique per-device budget token as a tag.
+ */
+ if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE))
+ goto fail;
+ tag = cmd->budget_token;
} else {
- cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
- scsi_done(cmd);
+ tag = scsi_cmd_to_rq(cmd)->tag;
}
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = qc->hw_tag = tag;
+ qc->ap = ap;
+ qc->dev = dev;
+
+ ata_qc_reinit(qc);
+
+ qc->scsicmd = cmd;
+ qc->scsidone = scsi_done;
+
+ qc->sg = scsi_sglist(cmd);
+ qc->n_elem = scsi_sg_count(cmd);
+
+ if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
+ qc->flags |= ATA_QCFLAG_QUIET;
+
return qc;
+
+fail:
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_TASK_SET_FULL);
+ scsi_done(cmd);
+ return NULL;
}
static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b3be7a8f5bea..b1666adc1c3a 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1634,7 +1634,7 @@ EXPORT_SYMBOL_GPL(ata_sff_interrupt);
void ata_sff_lost_interrupt(struct ata_port *ap)
{
- u8 status;
+ u8 status = 0;
struct ata_queued_cmd *qc;
/* Only one outstanding command per SFF channel */
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index c9c2496d91ea..926a7f41303d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -44,7 +44,6 @@ static inline void ata_force_cbl(struct ata_port *ap) { }
#endif
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
-extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag, int class);
@@ -91,18 +90,6 @@ extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
-/* libata-sata.c */
-#ifdef CONFIG_SATA_HOST
-int ata_sas_allocate_tag(struct ata_port *ap);
-void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
-#else
-static inline int ata_sas_allocate_tag(struct ata_port *ap)
-{
- return -EOPNOTSUPP;
-}
-static inline void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) { }
-#endif
-
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 2e35505b683c..0117df0fe3c5 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -536,8 +536,8 @@ static int pata_ftide010_probe(struct platform_device *pdev)
return 0;
err_dis_clk:
- if (!IS_ERR(ftide->pclk))
- clk_disable_unprepare(ftide->pclk);
+ clk_disable_unprepare(ftide->pclk);
+
return ret;
}
@@ -547,8 +547,7 @@ static int pata_ftide010_remove(struct platform_device *pdev)
struct ftide010 *ftide = host->private_data;
ata_host_detach(ftide->host);
- if (!IS_ERR(ftide->pclk))
- clk_disable_unprepare(ftide->pclk);
+ clk_disable_unprepare(ftide->pclk);
return 0;
}
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 0c5a51970fbf..014ccb0f45dc 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
switch(ap->port_no)
{
case 0:
+ if (!ap->ioaddr.bmdma_addr)
+ return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3250ef317df6..03b6ae37a578 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -19,11 +19,12 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/libata.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
#include <linux/fsl/bestcomm/bestcomm.h>
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 0da58ce20d82..67ef2e26d7df 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -47,11 +47,9 @@
* criticial.
*/
-static unsigned long sil680_selreg(struct ata_port *ap, int r)
+static int sil680_selreg(struct ata_port *ap, int r)
{
- unsigned long base = 0xA0 + r;
- base += (ap->port_no << 4);
- return base;
+ return 0xA0 + (ap->port_no << 4) + r;
}
/**
@@ -65,12 +63,9 @@ static unsigned long sil680_selreg(struct ata_port *ap, int r)
* the unit shift.
*/
-static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
+static int sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
{
- unsigned long base = 0xA0 + r;
- base += (ap->port_no << 4);
- base |= adev->devno ? 2 : 0;
- return base;
+ return 0xA0 + (ap->port_no << 4) + r + (adev->devno << 1);
}
@@ -85,8 +80,9 @@ static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev,
static int sil680_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned long addr = sil680_selreg(ap, 0);
+ int addr = sil680_selreg(ap, 0);
u8 ata66;
+
pci_read_config_byte(pdev, addr, &ata66);
if (ata66 & 1)
return ATA_CBL_PATA80;
@@ -113,9 +109,9 @@ static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1
};
- unsigned long tfaddr = sil680_selreg(ap, 0x02);
- unsigned long addr = sil680_seldev(ap, adev, 0x04);
- unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+ int tfaddr = sil680_selreg(ap, 0x02);
+ int addr = sil680_seldev(ap, adev, 0x04);
+ int addr_mask = 0x80 + 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
int lowest_pio = pio;
@@ -165,9 +161,9 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned long ma = sil680_seldev(ap, adev, 0x08);
- unsigned long ua = sil680_seldev(ap, adev, 0x0C);
- unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+ int ma = sil680_seldev(ap, adev, 0x08);
+ int ua = sil680_seldev(ap, adev, 0x0C);
+ int addr_mask = 0x80 + 4 * ap->port_no;
int port_shift = adev->devno * 4;
u8 scsc, mode;
u16 multi, ultra;
@@ -219,7 +215,7 @@ static void sil680_sff_exec_command(struct ata_port *ap,
static bool sil680_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned long addr = sil680_selreg(ap, 1);
+ int addr = sil680_selreg(ap, 1);
u8 val;
pci_read_config_byte(pdev, addr, &val);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 439ca882f73c..215c02d4056a 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -248,9 +248,9 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev,
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *peer = ata_dev_pair(adev);
struct ata_timing t, p;
- static int via_clock = 33333; /* Bus clock in kHZ */
- unsigned long T = 1000000000 / via_clock;
- unsigned long UT = T;
+ const int via_clock = 33333; /* Bus clock in kHz */
+ const int T = 1000000000 / via_clock;
+ int UT = T;
int ut;
int offset = 3 - (2*ap->port_no) - adev->devno;
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index bec33d781ae0..e3263e961045 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -137,7 +137,11 @@ struct sata_dwc_device {
#endif
};
-#define SATA_DWC_QCMD_MAX 32
+/*
+ * Allow one extra special slot for commands and DMA management
+ * to account for libata internal commands.
+ */
+#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
struct sata_dwc_device_port {
struct sata_dwc_device *hsdev;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 00e1c7941d0e..b729e9919bb0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -318,7 +318,6 @@ static int gemini_sata_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct sata_gemini *sg;
struct regmap *map;
- struct resource *res;
enum gemini_muxmode muxmode;
u32 gmode;
u32 gmask;
@@ -329,11 +328,7 @@ static int gemini_sata_probe(struct platform_device *pdev)
return -ENOMEM;
sg->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- sg->base = devm_ioremap_resource(dev, res);
+ sg->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sg->base))
return PTR_ERR(sg->base);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1d6636ebaac5..f73b836047cf 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -667,6 +667,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
core_mask = &cpu_topology[cpu].llc_sibling;
}
+ /*
+ * For systems with no shared cpu-side LLC but with clusters defined,
+ * extend core_mask to cluster_siblings. The sched domain builder will
+ * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
+ */
+ if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
+ cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
+ core_mask = &cpu_topology[cpu].cluster_sibling;
+
return core_mask;
}
@@ -684,7 +693,7 @@ void update_siblings_masks(unsigned int cpuid)
for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index af6bea56f4e2..3fc3b5940bb3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
return -EPROBE_DEFER;
}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 94d1789a233e..406a907a4cae 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -735,6 +735,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
+ struct cred *kern_cred = NULL;
+ const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -751,6 +753,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
+ /*
+ * We are about to try to access the firmware file. Because we may have been
+ * called by a driver when serving an unrelated request from userland, we use
+ * the kernel credentials to read the file.
+ */
+ kern_cred = prepare_kernel_cred(NULL);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
+
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
/* Only full reads can support decompression, platform, and sysfs. */
@@ -776,6 +790,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
} else
ret = assign_fw(fw, device);
+ revert_creds(old_cred);
+ put_cred(kern_cred);
+
out:
if (ret < 0) {
fw_abort_batch_reqs(fw);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index e9d1efcda89b..ac6ad9ab67f9 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -152,9 +152,19 @@ static struct attribute *default_attrs[] = {
NULL
};
+static umode_t topology_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.bin_attrs = bin_attrs,
+ .is_visible = topology_is_visible,
.name = "topology"
};
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 519b6d38d4df..fdb81f2794cd 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -33,6 +33,22 @@ config BLK_DEV_FD
To compile this driver as a module, choose M here: the
module will be called floppy.
+config BLK_DEV_FD_RAWCMD
+ bool "Support for raw floppy disk commands (DEPRECATED)"
+ depends on BLK_DEV_FD
+ help
+ If you want to use actual physical floppies and expect to do
+ special low-level hardware accesses to them (access and use
+ non-standard formats, for example), then enable this.
+
+ Note that the code enabled by this option is rarely used and
+ might be unstable or insecure, and distros should not enable it.
+
+ Note: FDRAWCMD is deprecated and will be removed from the kernel
+ in the near future.
+
+ If unsure, say N.
+
config AMIGA_FLOPPY
tristate "Amiga floppy support"
depends on AMIGA
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 84d0fcebd6af..749ae1246f4c 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -244,3 +244,5 @@ void aoenet_exit(void);
void aoenet_xmit(struct sk_buff_head *);
int is_aoe_netif(struct net_device *ifp);
int set_aoe_iflist(const char __user *str, size_t size);
+
+extern struct workqueue_struct *aoe_wq;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 8a91fcac6f82..348adf335217 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -435,7 +435,7 @@ err_mempool:
err:
spin_lock_irqsave(&d->lock, flags);
d->flags &= ~DEVFL_GD_NOW;
- schedule_work(&d->work);
+ queue_work(aoe_wq, &d->work);
spin_unlock_irqrestore(&d->lock, flags);
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 384073ef2323..d7317425be51 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -968,7 +968,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
d->flags |= DEVFL_NEWSIZE;
else
d->flags |= DEVFL_GDALLOC;
- schedule_work(&d->work);
+ queue_work(aoe_wq, &d->work);
}
static void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index c5753c6bfe80..b381d1c3ef32 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -321,7 +321,7 @@ flush(const char __user *str, size_t cnt, int exiting)
specified = 1;
}
- flush_scheduled_work();
+ flush_workqueue(aoe_wq);
/* pass one: do aoedev_downdev, which might sleep */
restart1:
spin_lock_irqsave(&devlist_lock, flags);
@@ -520,7 +520,7 @@ freetgt(struct aoedev *d, struct aoetgt *t)
void
aoedev_exit(void)
{
- flush_scheduled_work();
+ flush_workqueue(aoe_wq);
flush(NULL, 0, EXITING);
}
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 1e4e2971171c..6238c4c87cfc 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -16,6 +16,7 @@ MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels");
MODULE_VERSION(VERSION);
static struct timer_list timer;
+struct workqueue_struct *aoe_wq;
static void discover_timer(struct timer_list *t)
{
@@ -35,6 +36,7 @@ aoe_exit(void)
aoechr_exit();
aoedev_exit();
aoeblk_exit(); /* free cache after de-allocating bufs */
+ destroy_workqueue(aoe_wq);
}
static int __init
@@ -42,9 +44,13 @@ aoe_init(void)
{
int ret;
+ aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ if (!aoe_wq)
+ return -ENOMEM;
+
ret = aoedev_init();
if (ret)
- return ret;
+ goto dev_fail;
ret = aoechr_init();
if (ret)
goto chr_fail;
@@ -77,6 +83,8 @@ aoe_init(void)
aoechr_exit();
chr_fail:
aoedev_exit();
+ dev_fail:
+ destroy_workqueue(aoe_wq);
printk(KERN_INFO "aoe: initialisation failure.\n");
return ret;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 5d819a466e2f..e232cc4fd444 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -303,6 +303,7 @@ static struct atari_floppy_struct {
int ref;
int type;
struct blk_mq_tag_set tag_set;
+ int error_count;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@@ -705,14 +706,14 @@ static void fd_error( void )
if (!fd_request)
return;
- fd_request->error_count++;
- if (fd_request->error_count >= MAX_ERRORS) {
+ unit[SelectedDrive].error_count++;
+ if (unit[SelectedDrive].error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
finish_fdc();
return;
}
- else if (fd_request->error_count == RECALIBRATE_ERRORS) {
+ else if (unit[SelectedDrive].error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@@ -1491,7 +1492,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
- read_track = (ReqCmd == READ && fd_request->error_count == 0);
+ read_track = (ReqCmd == READ && unit[drive].error_count == 0);
else
read_track = 0;
@@ -1520,6 +1521,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_RESOURCE;
}
fd_request = bd->rq;
+ unit[drive].error_count = 0;
blk_mq_start_request(fd_request);
atari_disable_irq( IRQ_MFP_FDC );
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index df25eecf80af..9e060e49b3f8 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -683,7 +683,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
}
}
- want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
+ want = PFN_UP(words*sizeof(long));
have = b->bm_number_of_pages;
if (want == have) {
D_ASSERT(device, b->bm_pages != NULL);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 4b55e864a0a3..4d3efaa20b7b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1638,22 +1638,22 @@ struct sib_info {
};
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
-extern void notify_resource_state(struct sk_buff *,
+extern int notify_resource_state(struct sk_buff *,
unsigned int,
struct drbd_resource *,
struct resource_info *,
enum drbd_notification_type);
-extern void notify_device_state(struct sk_buff *,
+extern int notify_device_state(struct sk_buff *,
unsigned int,
struct drbd_device *,
struct device_info *,
enum drbd_notification_type);
-extern void notify_connection_state(struct sk_buff *,
+extern int notify_connection_state(struct sk_buff *,
unsigned int,
struct drbd_connection *,
struct connection_info *,
enum drbd_notification_type);
-extern void notify_peer_device_state(struct sk_buff *,
+extern int notify_peer_device_state(struct sk_buff *,
unsigned int,
struct drbd_peer_device *,
struct peer_device_info *,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9676a1d214bc..2887350ae010 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -903,31 +903,6 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
}
}
-/* communicated if (agreed_features & DRBD_FF_WSAME) */
-static void
-assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
- struct request_queue *q)
-{
- if (q) {
- p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
- p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
- p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
- p->qlim->io_min = cpu_to_be32(queue_io_min(q));
- p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
- p->qlim->discard_enabled = blk_queue_discard(q);
- p->qlim->write_same_capable = 0;
- } else {
- q = device->rq_queue;
- p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
- p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
- p->qlim->alignment_offset = 0;
- p->qlim->io_min = cpu_to_be32(queue_io_min(q));
- p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
- p->qlim->discard_enabled = 0;
- p->qlim->write_same_capable = 0;
- }
-}
-
int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
struct drbd_device *device = peer_device->device;
@@ -949,7 +924,9 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
memset(p, 0, packet_size);
if (get_ldev_if_state(device, D_NEGOTIATING)) {
- struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
+ struct block_device *bdev = device->ldev->backing_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+
d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
@@ -957,14 +934,32 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
q_order_type = drbd_queue_order_type(device);
max_bio_size = queue_max_hw_sectors(q) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
- assign_p_sizes_qlim(device, p, q);
+ p->qlim->physical_block_size =
+ cpu_to_be32(bdev_physical_block_size(bdev));
+ p->qlim->logical_block_size =
+ cpu_to_be32(bdev_logical_block_size(bdev));
+ p->qlim->alignment_offset =
+ cpu_to_be32(bdev_alignment_offset(bdev));
+ p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev));
+ p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev));
+ p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev);
put_ldev(device);
} else {
+ struct request_queue *q = device->rq_queue;
+
+ p->qlim->physical_block_size =
+ cpu_to_be32(queue_physical_block_size(q));
+ p->qlim->logical_block_size =
+ cpu_to_be32(queue_logical_block_size(q));
+ p->qlim->alignment_offset = 0;
+ p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+ p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+ p->qlim->discard_enabled = 0;
+
d_size = 0;
u_size = 0;
q_order_type = QUEUE_ORDERED_NONE;
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
- assign_p_sizes_qlim(device, p, NULL);
}
if (peer_device->connection->agreed_pro_version <= 94)
@@ -2719,6 +2714,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
@@ -2773,12 +2769,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
if (init_submitter(device)) {
err = ERR_NOMEM;
- goto out_idr_remove_vol;
+ goto out_idr_remove_from_resource;
}
err = add_disk(disk);
if (err)
- goto out_idr_remove_vol;
+ goto out_idr_remove_from_resource;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
@@ -2792,8 +2788,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_debugfs_device_add(device);
return NO_ERROR;
-out_idr_remove_vol:
- idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
for_each_connection(connection, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
@@ -3587,9 +3581,8 @@ const char *cmdname(enum drbd_packet cmd)
* when we want to support more than
* one PRO_VERSION */
static const char *cmdnames[] = {
+
[P_DATA] = "Data",
- [P_WSAME] = "WriteSame",
- [P_TRIM] = "Trim",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
@@ -3600,7 +3593,6 @@ const char *cmdname(enum drbd_packet cmd)
[P_DATA_REQUEST] = "DataRequest",
[P_RS_DATA_REQUEST] = "RSDataRequest",
[P_SYNC_PARAM] = "SyncParam",
- [P_SYNC_PARAM89] = "SyncParam89",
[P_PROTOCOL] = "ReportProtocol",
[P_UUIDS] = "ReportUUIDs",
[P_SIZES] = "ReportSizes",
@@ -3608,6 +3600,7 @@ const char *cmdname(enum drbd_packet cmd)
[P_SYNC_UUID] = "ReportSyncUUID",
[P_AUTH_CHALLENGE] = "AuthChallenge",
[P_AUTH_RESPONSE] = "AuthResponse",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
[P_PING] = "Ping",
[P_PING_ACK] = "PingAck",
[P_RECV_ACK] = "RecvAck",
@@ -3618,23 +3611,25 @@ const char *cmdname(enum drbd_packet cmd)
[P_NEG_DREPLY] = "NegDReply",
[P_NEG_RS_DREPLY] = "NegRSDReply",
[P_BARRIER_ACK] = "BarrierAck",
- [P_STATE_CHG_REQ] = "StateChgRequest",
[P_STATE_CHG_REPLY] = "StateChgReply",
[P_OV_REQUEST] = "OVRequest",
[P_OV_REPLY] = "OVReply",
[P_OV_RESULT] = "OVResult",
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
+ [P_SYNC_PARAM89] = "SyncParam89",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_DELAY_PROBE] = "DelayProbe",
[P_OUT_OF_SYNC] = "OutOfSync",
- [P_RETRY_WRITE] = "RetryWrite",
[P_RS_CANCEL] = "RSCancel",
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_PROTOCOL_UPDATE] = "protocol_update",
+ [P_TRIM] = "Trim",
[P_RS_THIN_REQ] = "rs_thin_req",
[P_RS_DEALLOCATED] = "rs_deallocated",
+ [P_WSAME] = "WriteSame",
+ [P_ZEROES] = "Zeroes",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 02030c9c4d3b..013d355a2033 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -770,6 +770,7 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
struct set_role_parms parms;
int err;
enum drbd_ret_code retcode;
+ enum drbd_state_rv rv;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
@@ -790,14 +791,14 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&adm_ctx.resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
- retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
- R_PRIMARY, parms.assume_uptodate);
+ rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
else
- retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
- R_SECONDARY, 0);
+ rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
mutex_unlock(&adm_ctx.resource->adm_mutex);
genl_lock();
+ drbd_adm_finish(&adm_ctx, info, rv);
+ return 0;
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
@@ -1204,50 +1205,40 @@ static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
}
static void decide_on_discard_support(struct drbd_device *device,
- struct request_queue *q,
- struct request_queue *b,
- bool discard_zeroes_if_aligned)
+ struct drbd_backing_dev *bdev)
{
- /* q = drbd device queue (device->rq_queue)
- * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
- * or NULL if diskless
- */
- struct drbd_connection *connection = first_peer_device(device)->connection;
- bool can_do = b ? blk_queue_discard(b) : true;
-
- if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
- can_do = false;
- drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
- }
- if (can_do) {
- /* We don't care for the granularity, really.
- * Stacking limits below should fix it for the local
- * device. Whether or not it is a suitable granularity
- * on the remote device is not our problem, really. If
- * you care, you need to use devices with similar
- * topology on all peers. */
- blk_queue_discard_granularity(q, 512);
- q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
- } else {
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- blk_queue_discard_granularity(q, 0);
- q->limits.max_discard_sectors = 0;
- q->limits.max_write_zeroes_sectors = 0;
- }
-}
+ struct drbd_connection *connection =
+ first_peer_device(device)->connection;
+ struct request_queue *q = device->rq_queue;
-static void fixup_discard_if_not_supported(struct request_queue *q)
-{
- /* To avoid confusion, if this queue does not support discard, clear
- * max_discard_sectors, which is what lsblk -D reports to the user.
- * Older kernels got this wrong in "stack limits".
- * */
- if (!blk_queue_discard(q)) {
- blk_queue_max_discard_sectors(q, 0);
- blk_queue_discard_granularity(q, 0);
+ if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
+ goto not_supported;
+
+ if (connection->cstate >= C_CONNECTED &&
+ !(connection->agreed_features & DRBD_FF_TRIM)) {
+ drbd_info(connection,
+ "peer DRBD too old, does not support TRIM: disabling discards\n");
+ goto not_supported;
}
+
+ /*
+ * We don't care for the granularity, really.
+ *
+ * Stacking limits below should fix it for the local device. Whether or
+ * not it is a suitable granularity on the remote device is not our
+ * problem, really. If you care, you need to use devices with similar
+ * topology on all peers.
+ */
+ blk_queue_discard_granularity(q, 512);
+ q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
+ q->limits.max_write_zeroes_sectors =
+ drbd_max_discard_sectors(connection);
+ return;
+
+not_supported:
+ blk_queue_discard_granularity(q, 0);
+ q->limits.max_discard_sectors = 0;
+ q->limits.max_write_zeroes_sectors = 0;
}
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
@@ -1273,7 +1264,6 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
unsigned int max_segments = 0;
struct request_queue *b = NULL;
struct disk_conf *dc;
- bool discard_zeroes_if_aligned = true;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
@@ -1282,7 +1272,6 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
rcu_read_lock();
dc = rcu_dereference(device->ldev->disk_conf);
max_segments = dc->max_bio_bvecs;
- discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
@@ -1292,13 +1281,12 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
- decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
+ decide_on_discard_support(device, bdev);
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
disk_update_readahead(device->vdisk);
}
- fixup_discard_if_not_supported(q);
fixup_write_zeroes(device, q);
}
@@ -1437,14 +1425,14 @@ static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
struct drbd_backing_dev *nbc)
{
- struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
+ struct block_device *bdev = nbc->backing_bdev;
if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
if (disk_conf->al_extents > drbd_al_extents_max(nbc))
disk_conf->al_extents = drbd_al_extents_max(nbc);
- if (!blk_queue_discard(q)) {
+ if (!bdev_max_discard_sectors(bdev)) {
if (disk_conf->rs_discard_granularity) {
disk_conf->rs_discard_granularity = 0; /* disable feature */
drbd_info(device, "rs_discard_granularity feature disabled\n");
@@ -1453,16 +1441,19 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
if (disk_conf->rs_discard_granularity) {
int orig_value = disk_conf->rs_discard_granularity;
+ sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
+ unsigned int discard_granularity = bdev_discard_granularity(bdev);
int remainder;
- if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
- disk_conf->rs_discard_granularity = q->limits.discard_granularity;
+ if (discard_granularity > disk_conf->rs_discard_granularity)
+ disk_conf->rs_discard_granularity = discard_granularity;
- remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
+ remainder = disk_conf->rs_discard_granularity %
+ discard_granularity;
disk_conf->rs_discard_granularity += remainder;
- if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
- disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
+ if (disk_conf->rs_discard_granularity > discard_size)
+ disk_conf->rs_discard_granularity = discard_size;
if (disk_conf->rs_discard_granularity != orig_value)
drbd_info(device, "rs_discard_granularity changed to %d\n",
@@ -1611,8 +1602,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
drbd_send_sync_param(peer_device);
}
- synchronize_rcu();
- kfree(old_disk_conf);
+ kvfree_rcu(old_disk_conf);
kfree(old_plan);
mod_timer(&device->request_timer, jiffies + HZ);
goto success;
@@ -2443,8 +2433,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
- synchronize_rcu();
- kfree(old_net_conf);
+ kvfree_rcu(old_net_conf);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
struct drbd_peer_device *peer_device;
@@ -2502,6 +2491,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
struct drbd_resource *resource;
struct drbd_connection *connection;
enum drbd_ret_code retcode;
+ enum drbd_state_rv rv;
int i;
int err;
@@ -2621,12 +2611,11 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
}
rcu_read_unlock();
- retcode = (enum drbd_ret_code)conn_request_state(connection,
- NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex);
- drbd_adm_finish(&adm_ctx, info, retcode);
+ drbd_adm_finish(&adm_ctx, info, rv);
return 0;
fail:
@@ -2734,11 +2723,12 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&adm_ctx.resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
- if (rv < SS_SUCCESS)
- retcode = (enum drbd_ret_code)rv;
- else
- retcode = NO_ERROR;
mutex_unlock(&adm_ctx.resource->adm_mutex);
+ if (rv < SS_SUCCESS) {
+ drbd_adm_finish(&adm_ctx, info, rv);
+ return 0;
+ }
+ retcode = NO_ERROR;
fail:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
@@ -2857,8 +2847,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
new_disk_conf->disk_size = (sector_t)rs.resize_size;
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&device->resource->conf_update);
- synchronize_rcu();
- kfree(old_disk_conf);
+ kvfree_rcu(old_disk_conf);
new_disk_conf = NULL;
}
@@ -4549,7 +4538,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
return drbd_notification_header_to_skb(msg, &nh, true);
}
-void notify_resource_state(struct sk_buff *skb,
+int notify_resource_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource *resource,
struct resource_info *resource_info,
@@ -4591,16 +4580,17 @@ void notify_resource_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_device_state(struct sk_buff *skb,
+int notify_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_device *device,
struct device_info *device_info,
@@ -4640,16 +4630,17 @@ void notify_device_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_connection_state(struct sk_buff *skb,
+int notify_connection_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection *connection,
struct connection_info *connection_info,
@@ -4689,16 +4680,17 @@ void notify_connection_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_peer_device_state(struct sk_buff *skb,
+int notify_peer_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device *peer_device,
struct peer_device_info *peer_device_info,
@@ -4739,13 +4731,14 @@ void notify_peer_device_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
void notify_helper(enum drbd_notification_type type,
@@ -4796,7 +4789,7 @@ fail:
err, seq);
}
-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
{
struct drbd_genlmsghdr *dh;
int err;
@@ -4810,11 +4803,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
if (nla_put_notification_header(skb, NOTIFY_EXISTS))
goto nla_put_failure;
genlmsg_end(skb, dh);
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
pr_err("Error %d sending event. Event seq:%u\n", err, seq);
+ return err;
}
static void free_state_changes(struct list_head *list)
@@ -4841,6 +4835,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int seq = cb->args[2];
unsigned int n;
enum drbd_notification_type flags = 0;
+ int err = 0;
/* There is no need for taking notification_mutex here: it doesn't
matter if the initial state events mix with later state chage
@@ -4849,32 +4844,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[5]--;
if (cb->args[5] == 1) {
- notify_initial_state_done(skb, seq);
+ err = notify_initial_state_done(skb, seq);
goto out;
}
n = cb->args[4]++;
if (cb->args[4] < cb->args[3])
flags |= NOTIFY_CONTINUES;
if (n < 1) {
- notify_resource_state_change(skb, seq, state_change->resource,
+ err = notify_resource_state_change(skb, seq, state_change->resource,
NOTIFY_EXISTS | flags);
goto next;
}
n--;
if (n < state_change->n_connections) {
- notify_connection_state_change(skb, seq, &state_change->connections[n],
+ err = notify_connection_state_change(skb, seq, &state_change->connections[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_connections;
if (n < state_change->n_devices) {
- notify_device_state_change(skb, seq, &state_change->devices[n],
+ err = notify_device_state_change(skb, seq, &state_change->devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_devices;
if (n < state_change->n_devices * state_change->n_connections) {
- notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+ err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
@@ -4889,7 +4884,10 @@ next:
cb->args[4] = 0;
}
out:
- return skb->len;
+ if (err)
+ return err;
+ else
+ return skb->len;
}
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 08da922f81d1..6762be53f409 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -364,7 +364,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
struct page *page = NULL;
- unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ unsigned int nr_pages = PFN_UP(payload_size);
if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
@@ -1511,7 +1511,6 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
{
struct block_device *bdev = device->ldev->backing_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
sector_t tmp, nr;
unsigned int max_discard_sectors, granularity;
int alignment;
@@ -1521,10 +1520,10 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
goto zero_out;
/* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
+ granularity = max(bdev_discard_granularity(bdev) >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
- max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
+ max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors))
goto zero_out;
@@ -1548,7 +1547,8 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
start = tmp;
}
while (nr_sectors >= max_discard_sectors) {
- err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0);
+ err |= blkdev_issue_discard(bdev, start, max_discard_sectors,
+ GFP_NOIO);
nr_sectors -= max_discard_sectors;
start += max_discard_sectors;
}
@@ -1560,7 +1560,7 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
nr = nr_sectors;
nr -= (unsigned int)nr % granularity;
if (nr) {
- err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
+ err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO);
nr_sectors -= nr;
start += nr;
}
@@ -1575,11 +1575,10 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
static bool can_do_reliable_discards(struct drbd_device *device)
{
- struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
struct disk_conf *dc;
bool can_do;
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
return false;
rcu_read_lock();
@@ -1629,9 +1628,9 @@ int drbd_submit_peer_request(struct drbd_device *device,
struct bio *bio;
struct page *page = peer_req->pages;
sector_t sector = peer_req->i.sector;
- unsigned data_size = peer_req->i.size;
- unsigned n_bios = 0;
- unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ unsigned int data_size = peer_req->i.size;
+ unsigned int n_bios = 0;
+ unsigned int nr_pages = PFN_UP(data_size);
/* TRIM/DISCARD: for now, always use the helper function
* blkdev_issue_zeroout(..., discard=true).
@@ -3751,8 +3750,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
drbd_info(connection, "peer data-integrity-alg: %s\n",
integrity_alg[0] ? integrity_alg : "(none)");
- synchronize_rcu();
- kfree(old_net_conf);
+ kvfree_rcu(old_net_conf);
return 0;
disconnect_rcu_unlock:
@@ -3903,7 +3901,6 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
drbd_err(device, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
- err = -EIO;
goto reconnect;
}
@@ -4121,8 +4118,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&connection->resource->conf_update);
- synchronize_rcu();
- kfree(old_disk_conf);
+ kvfree_rcu(old_disk_conf);
drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
(unsigned long)p_usize, (unsigned long)my_usize);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 75be0e16770a..e64bcfba30ef 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -922,7 +922,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- return 0;
+ return false;
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index b8a27818ab3f..3f7bf9f2d874 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
return rv;
}
-void notify_resource_state_change(struct sk_buff *skb,
+int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource_state_change *resource_state_change,
enum drbd_notification_type type)
@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
.res_susp_fen = resource_state_change->susp_fen[NEW],
};
- notify_resource_state(skb, seq, resource, &resource_info, type);
+ return notify_resource_state(skb, seq, resource, &resource_info, type);
}
-void notify_connection_state_change(struct sk_buff *skb,
+int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection_state_change *connection_state_change,
enum drbd_notification_type type)
@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
.conn_role = connection_state_change->peer_role[NEW],
};
- notify_connection_state(skb, seq, connection, &connection_info, type);
+ return notify_connection_state(skb, seq, connection, &connection_info, type);
}
-void notify_device_state_change(struct sk_buff *skb,
+int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_device_state_change *device_state_change,
enum drbd_notification_type type)
@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
.dev_disk_state = device_state_change->disk_state[NEW],
};
- notify_device_state(skb, seq, device, &device_info, type);
+ return notify_device_state(skb, seq, device, &device_info, type);
}
-void notify_peer_device_state_change(struct sk_buff *skb,
+int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device_state_change *p,
enum drbd_notification_type type)
@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
};
- notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+ return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
}
static void broadcast_state_change(struct drbd_state_change *state_change)
@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
- void (*last_func)(struct sk_buff *, unsigned int, void *,
+ int (*last_func)(struct sk_buff *, unsigned int, void *,
enum drbd_notification_type) = NULL;
void *last_arg = NULL;
@@ -2071,8 +2071,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
conn_free_crypto(connection);
mutex_unlock(&connection->resource->conf_update);
- synchronize_rcu();
- kfree(old_conf);
+ kvfree_rcu(old_conf);
}
if (ns_max.susp_fen) {
diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
index ba80f612d6ab..d5b0479bc9a6 100644
--- a/drivers/block/drbd/drbd_state_change.h
+++ b/drivers/block/drbd/drbd_state_change.h
@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
extern void copy_old_to_new_state_change(struct drbd_state_change *);
extern void forget_state_change(struct drbd_state_change *);
-extern void notify_resource_state_change(struct sk_buff *,
+extern int notify_resource_state_change(struct sk_buff *,
unsigned int,
struct drbd_resource_state_change *,
enum drbd_notification_type type);
-extern void notify_connection_state_change(struct sk_buff *,
+extern int notify_connection_state_change(struct sk_buff *,
unsigned int,
struct drbd_connection_state_change *,
enum drbd_notification_type type);
-extern void notify_device_state_change(struct sk_buff *,
+extern int notify_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_device_state_change *,
enum drbd_notification_type type);
-extern void notify_peer_device_state_change(struct sk_buff *,
+extern int notify_peer_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_peer_device_state_change *,
enum drbd_notification_type type);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 0f9956f4e9c4..af3051dd8912 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1030,7 +1030,7 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
{
if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
- int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ int i = PFN_UP(peer_req->i.size);
atomic_add(i, &device->pp_in_use_by_net);
atomic_sub(i, &device->pp_in_use);
spin_lock_irq(&device->resource->req_lock);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8c647532e3ce..015841f50f4e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
-/* Errors during formatting are counted here. */
-static int format_errors;
+/* errors encountered on the current (or last) request */
+static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@@ -530,7 +530,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
-static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
- } else if (*errors >= drive_params[current_drive].max_errors.reporting) {
+ } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
if (!next_valid_format(current_drive))
return;
}
- err_count = ++(*errors);
+ err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
@@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL;
}
format_req = *tmp_format_req;
- format_errors = 0;
cont = &format_cont;
- errors = &format_errors;
+ floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@@ -2759,10 +2757,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
- current_req->error_count = 0;
+ floppy_errors = 0;
list_del_init(&current_req->queuelist);
+ return 1;
}
- return current_req != NULL;
+ return 0;
}
/* Starts or continues processing request. Will automatically unlock the
@@ -2821,7 +2820,6 @@ do_request:
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
- errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
@@ -2982,6 +2980,8 @@ static const char *drive_name(int type, int drive)
return "(null)";
}
+#ifdef CONFIG_BLK_DEV_FD_RAWCMD
+
/* raw commands */
static void raw_cmd_done(int flag)
{
@@ -3181,6 +3181,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param)
return ret;
}
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+ void __user *param)
+{
+ int ret;
+
+ pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n");
+
+ if (type)
+ return -EINVAL;
+ if (lock_fdc(drive))
+ return -EINTR;
+ set_floppy(drive);
+ ret = raw_cmd_ioctl(cmd, param);
+ if (ret == -EINTR)
+ return -EINTR;
+ process_fd_request();
+ return ret;
+}
+
+#else /* CONFIG_BLK_DEV_FD_RAWCMD */
+
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+ void __user *param)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
@@ -3369,7 +3398,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
- int i;
int ret;
int size;
union inparam {
@@ -3520,16 +3548,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
outparam = &write_errors[drive];
break;
case FDRAWCMD:
- if (type)
- return -EINVAL;
- if (lock_fdc(drive))
- return -EINTR;
- set_floppy(drive);
- i = raw_cmd_ioctl(cmd, (void __user *)param);
- if (i == -EINTR)
- return -EINTR;
- process_fd_request();
- return i;
+ return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param);
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a58595f5ee2c..e2cb51810e89 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1,54 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * linux/drivers/block/loop.c
- *
- * Written by Theodore Ts'o, 3/29/93
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
- *
- * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
- * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
- *
- * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
- * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
- *
- * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
- *
- * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
- *
- * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
- *
- * Loadable modules and other fixes by AK, 1998
- *
- * Make real block number available to downstream transfer functions, enables
- * CBC (and relatives) mode encryption requiring unique IVs per data block.
- * Reed H. Petty, rhp@draper.net
- *
- * Maximum number of loop devices now dynamic via max_loop module parameter.
- * Russell Kroll <rkroll@exploits.org> 19990701
- *
- * Maximum number of loop devices when compiled-in now selectable by passing
- * max_loop=<1-255> to the kernel on boot.
- * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
- *
- * Completely rewrite request handling to be make_request_fn style and
- * non blocking, pushing work to a helper thread. Lots of fixes from
- * Al Viro too.
- * Jens Axboe <axboe@suse.de>, Nov 2000
- *
- * Support up to 256 loop devices
- * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
- *
- * Support for falling back on the write file operation when the address space
- * operations write_begin is not available on the backing filesystem.
- * Anton Altaparmakov, 16 Feb 2005
- *
- * Still To Fix:
- * - Advisory locking is ignored here.
- * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
- *
+ * Copyright 1993 by Theodore Ts'o.
*/
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
@@ -59,7 +12,6 @@
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/wait.h>
-#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/init.h>
#include <linux/swap.h>
@@ -80,10 +32,62 @@
#include <linux/blk-cgroup.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
+#include <linux/uaccess.h>
+#include <linux/blk-mq.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/loop.h>
+
+/* Possible states of device */
+enum {
+ Lo_unbound,
+ Lo_bound,
+ Lo_rundown,
+ Lo_deleting,
+};
-#include "loop.h"
+struct loop_func_table;
+
+struct loop_device {
+ int lo_number;
+ loff_t lo_offset;
+ loff_t lo_sizelimit;
+ int lo_flags;
+ char lo_file_name[LO_NAME_SIZE];
+
+ struct file * lo_backing_file;
+ struct block_device *lo_device;
+
+ gfp_t old_gfp_mask;
+
+ spinlock_t lo_lock;
+ int lo_state;
+ spinlock_t lo_work_lock;
+ struct workqueue_struct *workqueue;
+ struct work_struct rootcg_work;
+ struct list_head rootcg_cmd_list;
+ struct list_head idle_worker_list;
+ struct rb_root worker_tree;
+ struct timer_list timer;
+ bool use_dio;
+ bool sysfs_inited;
+
+ struct request_queue *lo_queue;
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *lo_disk;
+ struct mutex lo_mutex;
+ bool idr_visible;
+};
-#include <linux/uaccess.h>
+struct loop_cmd {
+ struct list_head list_entry;
+ bool use_aio; /* use AIO interface to handle I/O */
+ atomic_t ref; /* only for aio */
+ long ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+ struct cgroup_subsys_state *blkcg_css;
+ struct cgroup_subsys_state *memcg_css;
+};
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
#define LOOP_DEFAULT_HW_Q_DEPTH (128)
@@ -314,15 +318,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
mode |= FALLOC_FL_KEEP_SIZE;
- if (!blk_queue_discard(lo->lo_queue)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
+ if (!bdev_max_discard_sectors(lo->lo_device))
+ return -EOPNOTSUPP;
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
- ret = -EIO;
- out:
+ return -EIO;
return ret;
}
@@ -572,6 +573,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
+
+ /* suppress uevents while reconfiguring the device */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+
is_loop = is_loop_device(file);
error = loop_global_lock_killable(lo, is_loop);
if (error)
@@ -626,13 +631,18 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
fput(old_file);
if (partscan)
loop_reread_partitions(lo);
- return 0;
+
+ error = 0;
+done:
+ /* enable and uncork uevent now that we are done */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ return error;
out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
- return error;
+ goto done;
}
/* loop sysfs attributes */
@@ -762,7 +772,7 @@ static void loop_config_discard(struct loop_device *lo)
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
- granularity = backingq->limits.discard_granularity ?:
+ granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
queue_physical_block_size(backingq);
/*
@@ -787,14 +797,11 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = granularity;
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
} else {
q->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
}
- q->limits.discard_alignment = 0;
}
struct loop_worker {
@@ -808,8 +815,6 @@ struct loop_worker {
};
static void loop_workfn(struct work_struct *work);
-static void loop_rootcg_workfn(struct work_struct *work);
-static void loop_free_idle_workers(struct timer_list *timer);
#ifdef CONFIG_BLK_CGROUP
static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
@@ -893,6 +898,39 @@ queue_work:
spin_unlock_irq(&lo->lo_work_lock);
}
+static void loop_set_timer(struct loop_device *lo)
+{
+ timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
+}
+
+static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
+{
+ struct loop_worker *pos, *worker;
+
+ spin_lock_irq(&lo->lo_work_lock);
+ list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
+ idle_list) {
+ if (!delete_all &&
+ time_is_after_jiffies(worker->last_ran_at +
+ LOOP_IDLE_WORKER_TIMEOUT))
+ break;
+ list_del(&worker->idle_list);
+ rb_erase(&worker->rb_node, &lo->worker_tree);
+ css_put(worker->blkcg_css);
+ kfree(worker);
+ }
+ if (!list_empty(&lo->idle_worker_list))
+ loop_set_timer(lo);
+ spin_unlock_irq(&lo->lo_work_lock);
+}
+
+static void loop_free_idle_workers_timer(struct timer_list *timer)
+{
+ struct loop_device *lo = container_of(timer, struct loop_device, timer);
+
+ return loop_free_idle_workers(lo, false);
+}
+
static void loop_update_rotational(struct loop_device *lo)
{
struct file *file = lo->lo_backing_file;
@@ -903,7 +941,7 @@ static void loop_update_rotational(struct loop_device *lo)
/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
if (file_bdev)
- nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+ nonrot = bdev_nonrot(file_bdev);
if (nonrot)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
@@ -967,6 +1005,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
+ /* suppress uevents while reconfiguring the device */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@@ -1011,24 +1052,19 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
!file->f_op->write_iter)
lo->lo_flags |= LO_FLAGS_READ_ONLY;
- lo->workqueue = alloc_workqueue("loop%d",
- WQ_UNBOUND | WQ_FREEZABLE,
- 0,
- lo->lo_number);
if (!lo->workqueue) {
- error = -ENOMEM;
- goto out_unlock;
+ lo->workqueue = alloc_workqueue("loop%d",
+ WQ_UNBOUND | WQ_FREEZABLE,
+ 0, lo->lo_number);
+ if (!lo->workqueue) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
}
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
- INIT_LIST_HEAD(&lo->rootcg_cmd_list);
- INIT_LIST_HEAD(&lo->idle_worker_list);
- lo->worker_tree = RB_ROOT;
- timer_setup(&lo->timer, loop_free_idle_workers,
- TIMER_DEFERRABLE);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
@@ -1073,7 +1109,12 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
- return 0;
+
+ error = 0;
+done:
+ /* enable and uncork uevent now that we are done */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ return error;
out_unlock:
loop_global_unlock(lo, is_loop);
@@ -1084,53 +1125,24 @@ out_putf:
fput(file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- return error;
+ goto done;
}
static void __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
- struct loop_worker *pos, *worker;
-
- /*
- * Flush loop_configure() and loop_change_fd(). It is acceptable for
- * loop_validate_file() to succeed, for actual clear operation has not
- * started yet.
- */
- mutex_lock(&loop_validate_mutex);
- mutex_unlock(&loop_validate_mutex);
- /*
- * loop_validate_file() now fails because l->lo_state != Lo_bound
- * became visible.
- */
-
- /*
- * Since this function is called upon "ioctl(LOOP_CLR_FD)" xor "close()
- * after ioctl(LOOP_CLR_FD)", it is a sign of something going wrong if
- * lo->lo_state has changed while waiting for lo->lo_mutex.
- */
- mutex_lock(&lo->lo_mutex);
- BUG_ON(lo->lo_state != Lo_rundown);
- mutex_unlock(&lo->lo_mutex);
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false);
- /* freeze request queue during the transition */
- blk_mq_freeze_queue(lo->lo_queue);
-
- destroy_workqueue(lo->workqueue);
- spin_lock_irq(&lo->lo_work_lock);
- list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
- idle_list) {
- list_del(&worker->idle_list);
- rb_erase(&worker->rb_node, &lo->worker_tree);
- css_put(worker->blkcg_css);
- kfree(worker);
- }
- spin_unlock_irq(&lo->lo_work_lock);
- del_timer_sync(&lo->timer);
+ /*
+ * Freeze the request queue when unbinding on a live file descriptor and
+ * thus an open device. When called from ->release we are guaranteed
+ * that there is no I/O in progress already.
+ */
+ if (!release)
+ blk_mq_freeze_queue(lo->lo_queue);
spin_lock_irq(&lo->lo_lock);
filp = lo->lo_backing_file;
@@ -1151,7 +1163,8 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ if (!release)
+ blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
@@ -1202,11 +1215,20 @@ static int loop_clr_fd(struct loop_device *lo)
{
int err;
- err = mutex_lock_killable(&lo->lo_mutex);
+ /*
+ * Since lo_ioctl() is called without locks held, it is possible that
+ * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel.
+ *
+ * Therefore, use global lock when setting Lo_rundown state in order to
+ * make sure that loop_validate_file() will fail if the "struct file"
+ * which loop_configure()/loop_change_fd() found via fget() was this
+ * loop device.
+ */
+ err = loop_global_lock_killable(lo, true);
if (err)
return err;
if (lo->lo_state != Lo_bound) {
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, true);
return -ENXIO;
}
/*
@@ -1219,13 +1241,13 @@ static int loop_clr_fd(struct loop_device *lo)
* <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
* command to fail with EBUSY.
*/
- if (atomic_read(&lo->lo_refcnt) > 1) {
+ if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, true);
return 0;
}
lo->lo_state = Lo_rundown;
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, true);
__loop_clr_fd(lo, false);
return 0;
@@ -1257,15 +1279,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
- if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
- /* If any pages were dirtied after invalidate_bdev(), try again */
- err = -EAGAIN;
- pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
- __func__, lo->lo_number, lo->lo_file_name,
- lo->lo_device->bd_inode->i_mapping->nrpages);
- goto out_unfreeze;
- }
-
prev_lo_flags = lo->lo_flags;
err = loop_set_status_from_info(lo, info);
@@ -1476,21 +1489,10 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
-
- /* invalidate_bdev should have truncated all the pages */
- if (lo->lo_device->bd_inode->i_mapping->nrpages) {
- err = -EAGAIN;
- pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
- __func__, lo->lo_number, lo->lo_file_name,
- lo->lo_device->bd_inode->i_mapping->nrpages);
- goto out_unfreeze;
- }
-
blk_queue_logical_block_size(lo->lo_queue, arg);
blk_queue_physical_block_size(lo->lo_queue, arg);
blk_queue_io_min(lo->lo_queue, arg);
loop_update_dio(lo);
-out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
return err;
@@ -1720,33 +1722,15 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
}
#endif
-static int lo_open(struct block_device *bdev, fmode_t mode)
-{
- struct loop_device *lo = bdev->bd_disk->private_data;
- int err;
-
- err = mutex_lock_killable(&lo->lo_mutex);
- if (err)
- return err;
- if (lo->lo_state == Lo_deleting)
- err = -ENXIO;
- else
- atomic_inc(&lo->lo_refcnt);
- mutex_unlock(&lo->lo_mutex);
- return err;
-}
-
static void lo_release(struct gendisk *disk, fmode_t mode)
{
struct loop_device *lo = disk->private_data;
- mutex_lock(&lo->lo_mutex);
- if (atomic_dec_return(&lo->lo_refcnt))
- goto out_unlock;
+ if (disk_openers(disk) > 0)
+ return;
- if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
- if (lo->lo_state != Lo_bound)
- goto out_unlock;
+ mutex_lock(&lo->lo_mutex);
+ if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
/*
@@ -1755,27 +1739,30 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
*/
__loop_clr_fd(lo, true);
return;
- } else if (lo->lo_state == Lo_bound) {
- /*
- * Otherwise keep thread (if running) and config,
- * but flush possible ongoing bios in thread.
- */
- blk_mq_freeze_queue(lo->lo_queue);
- blk_mq_unfreeze_queue(lo->lo_queue);
}
-
-out_unlock:
mutex_unlock(&lo->lo_mutex);
}
+static void lo_free_disk(struct gendisk *disk)
+{
+ struct loop_device *lo = disk->private_data;
+
+ if (lo->workqueue)
+ destroy_workqueue(lo->workqueue);
+ loop_free_idle_workers(lo, true);
+ del_timer_sync(&lo->timer);
+ mutex_destroy(&lo->lo_mutex);
+ kfree(lo);
+}
+
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
- .open = lo_open,
.release = lo_release,
.ioctl = lo_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lo_compat_ioctl,
#endif
+ .free_disk = lo_free_disk,
};
/*
@@ -1834,12 +1821,14 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->blkcg_css = NULL;
cmd->memcg_css = NULL;
#ifdef CONFIG_BLK_CGROUP
- if (rq->bio && rq->bio->bi_blkg) {
- cmd->blkcg_css = &bio_blkcg(rq->bio)->css;
+ if (rq->bio) {
+ cmd->blkcg_css = bio_blkcg_css(rq->bio);
#ifdef CONFIG_MEMCG
- cmd->memcg_css =
- cgroup_get_e_css(cmd->blkcg_css->cgroup,
- &memory_cgrp_subsys);
+ if (cmd->blkcg_css) {
+ cmd->memcg_css =
+ cgroup_get_e_css(cmd->blkcg_css->cgroup,
+ &memory_cgrp_subsys);
+ }
#endif
}
#endif
@@ -1888,11 +1877,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
}
-static void loop_set_timer(struct loop_device *lo)
-{
- timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
-}
-
static void loop_process_work(struct loop_worker *worker,
struct list_head *cmd_list, struct loop_device *lo)
{
@@ -1941,27 +1925,6 @@ static void loop_rootcg_workfn(struct work_struct *work)
loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
}
-static void loop_free_idle_workers(struct timer_list *timer)
-{
- struct loop_device *lo = container_of(timer, struct loop_device, timer);
- struct loop_worker *pos, *worker;
-
- spin_lock_irq(&lo->lo_work_lock);
- list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
- idle_list) {
- if (time_is_after_jiffies(worker->last_ran_at +
- LOOP_IDLE_WORKER_TIMEOUT))
- break;
- list_del(&worker->idle_list);
- rb_erase(&worker->rb_node, &lo->worker_tree);
- css_put(worker->blkcg_css);
- kfree(worker);
- }
- if (!list_empty(&lo->idle_worker_list))
- loop_set_timer(lo);
- spin_unlock_irq(&lo->lo_work_lock);
-}
-
static const struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
.complete = lo_complete_rq,
@@ -1977,6 +1940,9 @@ static int loop_add(int i)
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
if (!lo)
goto out;
+ lo->worker_tree = RB_ROOT;
+ INIT_LIST_HEAD(&lo->idle_worker_list);
+ timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
lo->lo_state = Lo_unbound;
err = mutex_lock_killable(&loop_ctl_mutex);
@@ -2046,11 +2012,12 @@ static int loop_add(int i)
*/
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART;
- atomic_set(&lo->lo_refcnt, 0);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
spin_lock_init(&lo->lo_work_lock);
+ INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
+ INIT_LIST_HEAD(&lo->rootcg_cmd_list);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
disk->minors = 1 << part_shift;
@@ -2090,15 +2057,14 @@ static void loop_remove(struct loop_device *lo)
{
/* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
- blk_cleanup_disk(lo->lo_disk);
+ blk_cleanup_queue(lo->lo_disk->queue);
blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
mutex_unlock(&loop_ctl_mutex);
- /* There is no route which can find this loop device. */
- mutex_destroy(&lo->lo_mutex);
- kfree(lo);
+
+ put_disk(lo->lo_disk);
}
static void loop_probe(dev_t dev)
@@ -2137,13 +2103,12 @@ static int loop_control_remove(int idx)
ret = mutex_lock_killable(&lo->lo_mutex);
if (ret)
goto mark_visible;
- if (lo->lo_state != Lo_unbound ||
- atomic_read(&lo->lo_refcnt) > 0) {
+ if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
mutex_unlock(&lo->lo_mutex);
ret = -EBUSY;
goto mark_visible;
}
- /* Mark this loop device no longer open()-able. */
+ /* Mark this loop device as no more bound, but not quite unbound yet */
lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
deleted file mode 100644
index 082d4b6bfc6a..000000000000
--- a/drivers/block/loop.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * loop.h
- *
- * Written by Theodore Ts'o, 3/29/93.
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
- */
-#ifndef _LINUX_LOOP_H
-#define _LINUX_LOOP_H
-
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <uapi/linux/loop.h>
-
-/* Possible states of device */
-enum {
- Lo_unbound,
- Lo_bound,
- Lo_rundown,
- Lo_deleting,
-};
-
-struct loop_func_table;
-
-struct loop_device {
- int lo_number;
- atomic_t lo_refcnt;
- loff_t lo_offset;
- loff_t lo_sizelimit;
- int lo_flags;
- char lo_file_name[LO_NAME_SIZE];
-
- struct file * lo_backing_file;
- struct block_device *lo_device;
-
- gfp_t old_gfp_mask;
-
- spinlock_t lo_lock;
- int lo_state;
- spinlock_t lo_work_lock;
- struct workqueue_struct *workqueue;
- struct work_struct rootcg_work;
- struct list_head rootcg_cmd_list;
- struct list_head idle_worker_list;
- struct rb_root worker_tree;
- struct timer_list timer;
- bool use_dio;
- bool sysfs_inited;
-
- struct request_queue *lo_queue;
- struct blk_mq_tag_set tag_set;
- struct gendisk *lo_disk;
- struct mutex lo_mutex;
- bool idr_visible;
-};
-
-struct loop_cmd {
- struct list_head list_entry;
- bool use_aio; /* use AIO interface to handle I/O */
- atomic_t ref; /* only for aio */
- long ret;
- struct kiocb iocb;
- struct bio_vec *bvec;
- struct cgroup_subsys_state *blkcg_css;
- struct cgroup_subsys_state *memcg_css;
-};
-
-#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 4fbaf0b4958b..27386a572ba4 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2729,7 +2729,7 @@ static int mtip_dma_alloc(struct driver_data *dd)
{
struct mtip_port *port = dd->port;
- /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
+ /* Allocate dma memory for RX Fis, Identify, and Sector Buffer */
port->block1 =
dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
&port->block1_dma, GFP_KERNEL);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5a1f98494ddd..ac8b045c777c 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -333,7 +333,6 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
nbd->disk->queue->limits.discard_granularity = blksize;
- nbd->disk->queue->limits.discard_alignment = blksize;
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
}
blk_queue_logical_block_size(nbd->disk->queue, blksize);
@@ -947,11 +946,15 @@ static int wait_for_reconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout)
return 0;
- if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
+
+ if (!wait_event_timeout(config->conn_wait,
+ test_bit(NBD_RT_DISCONNECTED,
+ &config->runtime_flags) ||
+ atomic_read(&config->live_connections) > 0,
+ config->dead_conn_timeout))
return 0;
- return wait_event_timeout(config->conn_wait,
- atomic_read(&config->live_connections) > 0,
- config->dead_conn_timeout) > 0;
+
+ return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
@@ -1217,11 +1220,11 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
return -ENOSPC;
}
-static void nbd_bdev_reset(struct block_device *bdev)
+static void nbd_bdev_reset(struct nbd_device *nbd)
{
- if (bdev->bd_openers > 1)
+ if (disk_openers(nbd->disk) > 1)
return;
- set_capacity(bdev->bd_disk, 0);
+ set_capacity(nbd->disk, 0);
}
static void nbd_parse_flags(struct nbd_device *nbd)
@@ -1231,8 +1234,6 @@ static void nbd_parse_flags(struct nbd_device *nbd)
set_disk_ro(nbd->disk, true);
else
set_disk_ro(nbd->disk, false);
- if (config->flags & NBD_FLAG_SEND_TRIM)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (config->flags & NBD_FLAG_SEND_FLUSH) {
if (config->flags & NBD_FLAG_SEND_FUA)
blk_queue_write_cache(nbd->disk->queue, true, true);
@@ -1318,9 +1319,7 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->tag_set.timeout = 0;
nbd->disk->queue->limits.discard_granularity = 0;
- nbd->disk->queue->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ blk_queue_max_discard_sectors(nbd->disk->queue, 0);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@@ -1389,7 +1388,7 @@ static int nbd_start_device(struct nbd_device *nbd)
return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
}
-static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
+static int nbd_start_device_ioctl(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
int ret;
@@ -1408,7 +1407,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
- nbd_bdev_reset(bdev);
+ nbd_bdev_reset(nbd);
/* user requested, ignore socket errors */
if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
ret = 0;
@@ -1422,7 +1421,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
{
sock_shutdown(nbd);
__invalidate_device(bdev, true);
- nbd_bdev_reset(bdev);
+ nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -1468,7 +1467,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
config->flags = arg;
return 0;
case NBD_DO_IT:
- return nbd_start_device_ioctl(nbd, bdev);
+ return nbd_start_device_ioctl(nbd);
case NBD_CLEAR_QUE:
/*
* This is for compatibility only. The queue is always cleared
@@ -1579,7 +1578,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
struct nbd_device *nbd = disk->private_data;
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
- disk->part0->bd_openers == 0)
+ disk_openers(disk) == 0)
nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
@@ -1784,7 +1783,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 0;
- disk->queue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
blk_queue_max_segments(disk->queue, USHRT_MAX);
@@ -2082,6 +2080,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
sock_shutdown(nbd);
+ wake_up(&nbd->config->conn_wait);
/*
* Make sure recv thread has finished, we can safely call nbd_clear_que()
* to cancel the inflight I/Os.
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 05b1120e6623..539cfeac263d 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -11,6 +11,9 @@
#include <linux/init.h>
#include "null_blk.h"
+#undef pr_fmt
+#define pr_fmt(fmt) "null_blk: " fmt
+
#define FREE_BATCH 16
#define TICKS_PER_SEC 50ULL
@@ -232,6 +235,7 @@ static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
static int null_add_dev(struct nullb_device *dev);
+static struct nullb *null_find_dev_by_name(const char *name);
static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
static inline struct nullb_device *to_nullb_device(struct config_item *item)
@@ -560,6 +564,9 @@ config_item *nullb_group_make_item(struct config_group *group, const char *name)
{
struct nullb_device *dev;
+ if (null_find_dev_by_name(name))
+ return ERR_PTR(-EEXIST);
+
dev = null_alloc_dev();
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -1600,7 +1607,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
* Only fake timeouts need to execute blk_mq_complete_request() here.
*/
cmd->error = BLK_STS_TIMEOUT;
- if (cmd->fake_timeout)
+ if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
@@ -1765,9 +1772,7 @@ static void null_config_discard(struct nullb *nullb)
}
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
- nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
static const struct block_device_operations null_bio_ops = {
@@ -2061,7 +2066,13 @@ static int null_add_dev(struct nullb_device *dev)
null_config_discard(nullb);
- sprintf(nullb->disk_name, "nullb%d", nullb->index);
+ if (config_item_name(&dev->item)) {
+ /* Use configfs dir name as the device name */
+ snprintf(nullb->disk_name, sizeof(nullb->disk_name),
+ "%s", config_item_name(&dev->item));
+ } else {
+ sprintf(nullb->disk_name, "nullb%d", nullb->index);
+ }
rv = null_gendisk_register(nullb);
if (rv)
@@ -2071,6 +2082,8 @@ static int null_add_dev(struct nullb_device *dev)
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
+ pr_info("disk %s created\n", nullb->disk_name);
+
return 0;
out_cleanup_zone:
null_free_zoned_dev(dev);
@@ -2088,12 +2101,53 @@ out:
return rv;
}
+static struct nullb *null_find_dev_by_name(const char *name)
+{
+ struct nullb *nullb = NULL, *nb;
+
+ mutex_lock(&lock);
+ list_for_each_entry(nb, &nullb_list, list) {
+ if (strcmp(nb->disk_name, name) == 0) {
+ nullb = nb;
+ break;
+ }
+ }
+ mutex_unlock(&lock);
+
+ return nullb;
+}
+
+static int null_create_dev(void)
+{
+ struct nullb_device *dev;
+ int ret;
+
+ dev = null_alloc_dev();
+ if (!dev)
+ return -ENOMEM;
+
+ ret = null_add_dev(dev);
+ if (ret) {
+ null_free_dev(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void null_destroy_dev(struct nullb *nullb)
+{
+ struct nullb_device *dev = nullb->dev;
+
+ null_del_dev(nullb);
+ null_free_dev(dev);
+}
+
static int __init null_init(void)
{
int ret = 0;
unsigned int i;
struct nullb *nullb;
- struct nullb_device *dev;
if (g_bs > PAGE_SIZE) {
pr_warn("invalid block size\n");
@@ -2113,19 +2167,21 @@ static int __init null_init(void)
}
if (g_queue_mode == NULL_Q_RQ) {
- pr_err("legacy IO path no longer available\n");
+ pr_err("legacy IO path is no longer available\n");
return -EINVAL;
}
+
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("submit_queues param is set to %u.\n",
- nr_online_nodes);
+ nr_online_nodes);
g_submit_queues = nr_online_nodes;
}
- } else if (g_submit_queues > nr_cpu_ids)
+ } else if (g_submit_queues > nr_cpu_ids) {
g_submit_queues = nr_cpu_ids;
- else if (g_submit_queues <= 0)
+ } else if (g_submit_queues <= 0) {
g_submit_queues = 1;
+ }
if (g_queue_mode == NULL_Q_MQ && shared_tags) {
ret = null_init_tag_set(NULL, &tag_set);
@@ -2149,16 +2205,9 @@ static int __init null_init(void)
}
for (i = 0; i < nr_devices; i++) {
- dev = null_alloc_dev();
- if (!dev) {
- ret = -ENOMEM;
- goto err_dev;
- }
- ret = null_add_dev(dev);
- if (ret) {
- null_free_dev(dev);
+ ret = null_create_dev();
+ if (ret)
goto err_dev;
- }
}
pr_info("module loaded\n");
@@ -2167,9 +2216,7 @@ static int __init null_init(void)
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
- dev = nullb->dev;
- null_del_dev(nullb);
- null_free_dev(dev);
+ null_destroy_dev(nullb);
}
unregister_blkdev(null_major, "nullb");
err_conf:
@@ -2190,12 +2237,8 @@ static void __exit null_exit(void)
mutex_lock(&lock);
while (!list_empty(&nullb_list)) {
- struct nullb_device *dev;
-
nullb = list_entry(nullb_list.next, struct nullb, list);
- dev = nullb->dev;
- null_del_dev(nullb);
- null_free_dev(dev);
+ null_destroy_dev(nullb);
}
mutex_unlock(&lock);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 78eb56b0ca55..4525a65e1b23 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -16,13 +16,15 @@
#include <linux/mutex.h>
struct nullb_cmd {
- struct request *rq;
- struct bio *bio;
+ union {
+ struct request *rq;
+ struct bio *bio;
+ };
unsigned int tag;
blk_status_t error;
+ bool fake_timeout;
struct nullb_queue *nq;
struct hrtimer timer;
- bool fake_timeout;
};
struct nullb_queue {
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index dae54dd1aeac..ed158ea4fdd1 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -6,6 +6,9 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+#undef pr_fmt
+#define pr_fmt(fmt) "null_blk: " fmt
+
static inline sector_t mb_to_sects(unsigned long mb)
{
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
@@ -75,8 +78,8 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
dev->zone_capacity = dev->zone_size;
if (dev->zone_capacity > dev->zone_size) {
- pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
- dev->zone_capacity, dev->zone_size);
+ pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
+ dev->zone_capacity, dev->zone_size);
return -EINVAL;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 86c8794ede41..789093375344 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -12,7 +12,7 @@
* Theory of operation:
*
* At the lowest level, there is the standard driver for the CD/DVD device,
- * typically ide-cd.c or sr.c. This driver can handle read and write requests,
+ * such as drivers/scsi/sr.c. This driver can handle read and write requests,
* but it doesn't know anything about the special restrictions that apply to
* packet writing. One restriction is that write requests must be aligned to
* packet boundaries on the physical media, and the size of a write request
@@ -522,7 +522,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
goto no_pkt;
pkt->frames = frames;
- pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
+ pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
if (!pkt->w_bio)
goto no_bio;
@@ -536,27 +536,21 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
- struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
- if (!bio)
+ pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
+ if (!pkt->r_bios[i])
goto no_rd_bio;
-
- pkt->r_bios[i] = bio;
}
return pkt;
no_rd_bio:
- for (i = 0; i < frames; i++) {
- struct bio *bio = pkt->r_bios[i];
- if (bio)
- bio_put(bio);
- }
-
+ for (i = 0; i < frames; i++)
+ kfree(pkt->r_bios[i]);
no_page:
for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
if (pkt->pages[i])
__free_page(pkt->pages[i]);
- bio_put(pkt->w_bio);
+ kfree(pkt->w_bio);
no_bio:
kfree(pkt);
no_pkt:
@@ -570,14 +564,11 @@ static void pkt_free_packet_data(struct packet_data *pkt)
{
int i;
- for (i = 0; i < pkt->frames; i++) {
- struct bio *bio = pkt->r_bios[i];
- if (bio)
- bio_put(bio);
- }
+ for (i = 0; i < pkt->frames; i++)
+ kfree(pkt->r_bios[i]);
for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
__free_page(pkt->pages[i]);
- bio_put(pkt->w_bio);
+ kfree(pkt->w_bio);
kfree(pkt);
}
@@ -951,6 +942,7 @@ static void pkt_end_io_read(struct bio *bio)
if (bio->bi_status)
atomic_inc(&pkt->io_errors);
+ bio_uninit(bio);
if (atomic_dec_and_test(&pkt->io_wait)) {
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
@@ -968,6 +960,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
pd->stats.pkt_ended++;
+ bio_uninit(bio);
pkt_bio_finished(pd);
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
@@ -1022,7 +1015,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_reset(bio, pd->bdev, REQ_OP_READ);
+ bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1235,7 +1228,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
+ bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
+ REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b844432bad20..2b21f717cce1 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4942,7 +4942,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
if (rbd_dev->opts->trim) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = rbd_dev->opts->alloc_size;
blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index b66e8840b94b..409c76b81aed 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -25,6 +25,7 @@ static int rnbd_client_major;
static DEFINE_IDA(index_ida);
static DEFINE_MUTEX(sess_lock);
static LIST_HEAD(sess_list);
+static struct workqueue_struct *rnbd_clt_wq;
/*
* Maximum number of partitions an instance can have.
@@ -1364,11 +1365,9 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
dev->queue->limits.discard_granularity = dev->discard_granularity;
dev->queue->limits.discard_alignment = dev->discard_alignment;
- if (dev->max_discard_sectors)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
if (dev->secure_discard)
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
-
+ blk_queue_max_secure_erase_sectors(dev->queue,
+ dev->max_discard_sectors);
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
blk_queue_max_segments(dev->queue, dev->max_segments);
@@ -1761,12 +1760,12 @@ static void rnbd_destroy_sessions(void)
* procedure takes minutes.
*/
INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
- queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
+ queue_work(rnbd_clt_wq, &dev->unmap_on_rmmod_work);
}
rnbd_clt_put_sess(sess);
}
/* Wait for all scheduled unmap works */
- flush_workqueue(system_long_wq);
+ flush_workqueue(rnbd_clt_wq);
WARN_ON(!list_empty(&sess_list));
}
@@ -1791,6 +1790,14 @@ static int __init rnbd_client_init(void)
pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
err);
unregister_blkdev(rnbd_client_major, "rnbd");
+ return err;
+ }
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ if (!rnbd_clt_wq) {
+ pr_err("Failed to load module, alloc_workqueue failed.\n");
+ rnbd_clt_destroy_sysfs_files();
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ err = -ENOMEM;
}
return err;
@@ -1801,6 +1808,7 @@ static void __exit rnbd_client_exit(void)
rnbd_destroy_sessions();
unregister_blkdev(rnbd_client_major, "rnbd");
ida_destroy(&index_ida);
+ destroy_workqueue(rnbd_clt_wq);
}
module_init(rnbd_client_init);
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 2c3df02b5e8e..4309e5252469 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -44,16 +44,12 @@ static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
{
- return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
+ return bdev_max_secure_erase_sectors(dev->bdev);
}
static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
{
- if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
- return 0;
-
- return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
- REQ_OP_DISCARD);
+ return bdev_max_discard_sectors(dev->bdev);
}
static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
@@ -63,7 +59,7 @@ static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
{
- return bdev_get_queue(dev->bdev)->limits.discard_alignment;
+ return bdev_discard_alignment(dev->bdev);
}
#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index f04df6294650..beaef43a67b9 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -533,7 +533,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
- struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id =
@@ -558,9 +557,9 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
rsp->cache_policy = 0;
- if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ if (bdev_write_cache(rnbd_dev->bdev))
rsp->cache_policy |= RNBD_WRITEBACK;
- if (blk_queue_fua(q))
+ if (bdev_fua(rnbd_dev->bdev))
rsp->cache_policy |= RNBD_FUA;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index a8bcf3f664af..d624cc8eddc3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -867,11 +867,12 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_io_opt(q, blk_size * opt_io_size);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
- q->limits.discard_granularity = blk_size;
-
virtio_cread(vdev, struct virtio_blk_config,
discard_sector_alignment, &v);
- q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
+ if (v)
+ q->limits.discard_granularity = v << SECTOR_SHIFT;
+ else
+ q->limits.discard_granularity = blk_size;
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
@@ -888,8 +889,6 @@ static int virtblk_probe(struct virtio_device *vdev)
v = sg_elems;
blk_queue_max_discard_segments(q,
min(v, MAX_DISCARD_SEGMENTS));
-
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index de42458195bc..a97f2bf5b01b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -970,7 +970,6 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
struct block_device *bdev = blkif->vbd.bdev;
- unsigned long secure;
struct phys_req preq;
xen_blkif_get(blkif);
@@ -987,13 +986,15 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
}
ring->st_ds_req++;
- secure = (blkif->vbd.discard_secure &&
- (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
- BLKDEV_DISCARD_SECURE : 0;
+ if (blkif->vbd.discard_secure &&
+ (req->u.discard.flag & BLKIF_DISCARD_SECURE))
+ err = blkdev_issue_secure_erase(bdev,
+ req->u.discard.sector_number,
+ req->u.discard.nr_sectors, GFP_KERNEL);
+ else
+ err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
+ req->u.discard.nr_sectors, GFP_KERNEL);
- err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
- req->u.discard.nr_sectors,
- GFP_KERNEL, secure);
fail_response:
if (err == -EOPNOTSUPP) {
pr_debug("discard op failed, not supported\n");
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f09040435e2e..97de13b14175 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -484,7 +484,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
{
struct xen_vbd *vbd;
struct block_device *bdev;
- struct request_queue *q;
vbd = &blkif->vbd;
vbd->handle = handle;
@@ -516,11 +515,9 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
- q = bdev_get_queue(bdev);
- if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ if (bdev_write_cache(bdev))
vbd->flush_support = true;
-
- if (q && blk_queue_secure_erase(q))
+ if (bdev_max_secure_erase_sectors(bdev))
vbd->discard_secure = true;
vbd->feature_gnt_persistent = feature_persistent;
@@ -578,22 +575,21 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
int err;
int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev;
- struct request_queue *q = bdev_get_queue(bdev);
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
- if (blk_queue_discard(q)) {
+ if (bdev_max_discard_sectors(bdev)) {
err = xenbus_printf(xbt, dev->nodename,
"discard-granularity", "%u",
- q->limits.discard_granularity);
+ bdev_discard_granularity(bdev));
if (err) {
dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
return;
}
err = xenbus_printf(xbt, dev->nodename,
"discard-alignment", "%u",
- q->limits.discard_alignment);
+ bdev_discard_alignment(bdev));
if (err) {
dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
return;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 003056d4f7f5..0f3f5238f7bc 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -944,13 +944,13 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
+ blk_queue_max_secure_erase_sectors(rq,
+ get_capacity(gd));
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -1606,8 +1606,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
- blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
+ blk_queue_max_discard_sectors(rq, 0);
+ blk_queue_max_secure_erase_sectors(rq, 0);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e9474b02012d..6853dd3c7d3a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1675,9 +1675,10 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
+ start_time = bdev_start_io_acct(bdev->bd_disk->part0,
+ SECTORS_PER_PAGE, op, jiffies);
ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
- disk_end_io_acct(bdev->bd_disk, op, start_time);
+ bdev_end_io_acct(bdev->bd_disk->part0, op, start_time);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -1786,7 +1787,7 @@ static ssize_t reset_store(struct device *dev,
int ret;
unsigned short do_reset;
struct zram *zram;
- struct block_device *bdev;
+ struct gendisk *disk;
ret = kstrtou16(buf, 10, &do_reset);
if (ret)
@@ -1796,26 +1797,26 @@ static ssize_t reset_store(struct device *dev,
return -EINVAL;
zram = dev_to_zram(dev);
- bdev = zram->disk->part0;
+ disk = zram->disk;
- mutex_lock(&bdev->bd_disk->open_mutex);
+ mutex_lock(&disk->open_mutex);
/* Do not reset an active device or claimed device */
- if (bdev->bd_openers || zram->claim) {
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ if (disk_openers(disk) || zram->claim) {
+ mutex_unlock(&disk->open_mutex);
return -EBUSY;
}
/* From now on, anyone can't open /dev/zram[0-9] */
zram->claim = true;
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ mutex_unlock(&disk->open_mutex);
/* Make sure all the pending I/O are finished */
- sync_blockdev(bdev);
+ sync_blockdev(disk->part0);
zram_reset_device(zram);
- mutex_lock(&bdev->bd_disk->open_mutex);
+ mutex_lock(&disk->open_mutex);
zram->claim = false;
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ mutex_unlock(&disk->open_mutex);
return len;
}
@@ -1952,7 +1953,6 @@ static int zram_add(void)
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
/*
* zram_bio_discard() will clear all logical blocks if logical block
@@ -1987,19 +1987,18 @@ out_free_dev:
static int zram_remove(struct zram *zram)
{
- struct block_device *bdev = zram->disk->part0;
bool claimed;
- mutex_lock(&bdev->bd_disk->open_mutex);
- if (bdev->bd_openers) {
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ mutex_lock(&zram->disk->open_mutex);
+ if (disk_openers(zram->disk)) {
+ mutex_unlock(&zram->disk->open_mutex);
return -EBUSY;
}
claimed = zram->claim;
if (!claimed)
zram->claim = true;
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ mutex_unlock(&zram->disk->open_mutex);
zram_debugfs_unregister(zram);
@@ -2011,7 +2010,7 @@ static int zram_remove(struct zram *zram)
;
} else {
/* Make sure all the pending I/O are finished */
- sync_blockdev(bdev);
+ sync_blockdev(zram->disk->part0);
zram_reset_device(zram);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index 5e0e4393ce4d..0cfe859a4ac4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -224,8 +224,12 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
if (error)
return error;
+ msi_lock_descs(dev);
if (msi_first_desc(dev, MSI_DESC_ALL))
- return -EINVAL;
+ error = -EINVAL;
+ msi_unlock_descs(dev);
+ if (error)
+ return error;
/*
* NOTE: Calling this function will trigger the invocation of the
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 60fbd42041dd..828c66bbaa67 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -352,8 +352,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
pdev = of_find_device_by_node(rd->dn);
if (!pdev) {
- dev_err(&pdev->dev,
- "Could not find platform device for '%pOF'\n",
+ pr_err("Could not find platform device for '%pOF'\n",
rd->dn);
ret = notifier_from_errno(-EINVAL);
@@ -370,7 +369,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
return ret;
}
-struct notifier_block weim_of_notifier = {
+static struct notifier_block weim_of_notifier = {
.notifier_call = of_weim_notify,
};
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9527b7d63840..541ced27d941 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -1060,6 +1060,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)
* the intermediate restore kernel reinitializes MHI device with new
* context.
*/
+ flush_work(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, true);
mhi_unprepare_after_power_down(mhi_cntrl);
@@ -1085,6 +1086,7 @@ static const struct dev_pm_ops mhi_pci_pm_ops = {
.resume = mhi_pci_resume,
.freeze = mhi_pci_freeze,
.thaw = mhi_pci_restore,
+ .poweroff = mhi_pci_freeze,
.restore = mhi_pci_restore,
#endif
};
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 4566e730ef2b..60b082fe2ed0 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -227,6 +227,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+ return rdev;
+
err_device_add:
put_device(&rdev->dev);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 54c0ee6dda30..7a1b1f9e4933 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3232,13 +3232,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
*/
static int sysc_check_active_timer(struct sysc *ddata)
{
+ int error;
+
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
return 0;
+ /*
+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
+ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
+ * dts: Fix timer regression for beagleboard revision c"). This all
+ * can be dropped if we stop supporting old beagleboard revisions
+ * A to B4 at some point.
+ */
+ if (sysc_soc->soc == SOC_3430)
+ error = -ENXIO;
+ else
+ error = -EBUSY;
+
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
- return -ENXIO;
+ return error;
return 0;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 7bd10d63ddbe..416f723a2dbb 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -14,15 +14,6 @@
actually talk to the hardware. Suggestions are welcome.
Patches that work are more welcome though. ;-)
- To Do List:
- ----------------------------------
-
- -- Modify sysctl/proc interface. I plan on having one directory per
- drive, with entries for outputing general drive information, and sysctl
- based tunable parameters such as whether the tray should auto-close for
- that drive. Suggestions (or patches) for this welcome!
-
-
Revision History
----------------------------------
1.00 Date Unknown -- David van Leeuwen <david@tm.tno.nl>
@@ -648,6 +639,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
mutex_unlock(&cdrom_mutex);
return 0;
}
+EXPORT_SYMBOL(register_cdrom);
#undef ENSURE
void unregister_cdrom(struct cdrom_device_info *cdi)
@@ -663,6 +655,7 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
+EXPORT_SYMBOL(unregister_cdrom);
int cdrom_get_media_event(struct cdrom_device_info *cdi,
struct media_event_desc *med)
@@ -690,6 +683,7 @@ int cdrom_get_media_event(struct cdrom_device_info *cdi,
memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
return 0;
}
+EXPORT_SYMBOL(cdrom_get_media_event);
static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
struct rwrt_feature_desc *rfd)
@@ -1206,6 +1200,7 @@ err:
cdi->use_count--;
return ret;
}
+EXPORT_SYMBOL(cdrom_open);
/* This code is similar to that in open_for_data. The routine is called
whenever an audio play operation is requested.
@@ -1301,6 +1296,7 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
cdo->tray_move(cdi, 1);
}
}
+EXPORT_SYMBOL(cdrom_release);
static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
struct cdrom_changer_info *buf)
@@ -1365,7 +1361,6 @@ out_free:
*/
int cdrom_number_of_slots(struct cdrom_device_info *cdi)
{
- int status;
int nslots = 1;
struct cdrom_changer_info *info;
@@ -1377,12 +1372,13 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi)
if (!info)
return -ENOMEM;
- if ((status = cdrom_read_mech_status(cdi, info)) == 0)
+ if (cdrom_read_mech_status(cdi, info) == 0)
nslots = info->hdr.nslots;
kfree(info);
return nslots;
}
+EXPORT_SYMBOL(cdrom_number_of_slots);
/* If SLOT < 0, unload the current slot. Otherwise, try to load SLOT. */
@@ -1582,6 +1578,7 @@ void init_cdrom_command(struct packet_command *cgc, void *buf, int len,
cgc->data_direction = type;
cgc->timeout = CDROM_DEF_TIMEOUT;
}
+EXPORT_SYMBOL(init_cdrom_command);
/* DVD handling */
@@ -2000,6 +1997,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi,
cgc->data_direction = CGC_DATA_READ;
return cdo->generic_packet(cdi, cgc);
}
+EXPORT_SYMBOL(cdrom_mode_sense);
int cdrom_mode_select(struct cdrom_device_info *cdi,
struct packet_command *cgc)
@@ -2015,6 +2013,7 @@ int cdrom_mode_select(struct cdrom_device_info *cdi,
cgc->data_direction = CGC_DATA_WRITE;
return cdo->generic_packet(cdi, cgc);
}
+EXPORT_SYMBOL(cdrom_mode_select);
static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
struct cdrom_subchnl *subchnl, int mcn)
@@ -2444,14 +2443,6 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
return -EINVAL;
}
- /*
- * ->select_disc is a hook to allow a driver-specific way of
- * seleting disc. However, since there is no equivalent hook for
- * cdrom_slot_status this may not actually be useful...
- */
- if (cdi->ops->select_disc)
- return cdi->ops->select_disc(cdi, arg);
-
cd_dbg(CD_CHANGER, "Using generic cdrom_select_disc()\n");
return cdrom_select_disc(cdi, arg);
}
@@ -2893,6 +2884,7 @@ use_toc:
*last_written = toc.cdte_addr.lba;
return 0;
}
+EXPORT_SYMBOL(cdrom_get_last_written);
/* return the next writable block. also for udf file system. */
static int cdrom_get_next_writable(struct cdrom_device_info *cdi,
@@ -3430,18 +3422,7 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
return -ENOSYS;
}
-
-EXPORT_SYMBOL(cdrom_get_last_written);
-EXPORT_SYMBOL(register_cdrom);
-EXPORT_SYMBOL(unregister_cdrom);
-EXPORT_SYMBOL(cdrom_open);
-EXPORT_SYMBOL(cdrom_release);
EXPORT_SYMBOL(cdrom_ioctl);
-EXPORT_SYMBOL(cdrom_number_of_slots);
-EXPORT_SYMBOL(cdrom_mode_select);
-EXPORT_SYMBOL(cdrom_mode_sense);
-EXPORT_SYMBOL(init_cdrom_command);
-EXPORT_SYMBOL(cdrom_get_media_event);
#ifdef CONFIG_SYSCTL
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c59265146e9c..f1827257ef0e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3677,8 +3677,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num = intf->intf_num, index;
+ int intf_num, index;
+ if (!intf)
+ return;
+ intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
intf->in_shutdown = true;
@@ -4518,6 +4521,8 @@ return_unspecified:
} else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
+ requeue = 0;
+ goto out;
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|| (msg->rsp[1] != msg->data[1])) {
/*
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 64dedb3ef8ec..5604a810fb3d 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2220,10 +2220,7 @@ static void cleanup_one_si(struct smi_info *smi_info)
return;
list_del(&smi_info->link);
-
- if (smi_info->intf)
- ipmi_unregister_smi(smi_info->intf);
-
+ ipmi_unregister_smi(smi_info->intf);
kfree(smi_info);
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1d8242969751..4c9adb4f3d5d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -318,6 +318,13 @@ static void crng_reseed(bool force)
* the resultant ChaCha state to the user, along with the second
* half of the block containing 32 bytes of random data that may
* be used; random_data_len may not be greater than 32.
+ *
+ * The returned ChaCha state contains within it a copy of the old
+ * key value, at index 4, so the state should always be zeroed out
+ * immediately after using in order to maintain forward secrecy.
+ * If the state cannot be erased in a timely manner, then it is
+ * safer to set the random_data parameter to &chacha_state[4] so
+ * that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
u32 chacha_state[CHACHA_STATE_WORDS],
@@ -437,11 +444,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
* This shouldn't be set by functions like add_device_randomness(),
* where we can't trust the buffer passed to it is guaranteed to be
* unpredictable (so it might not have any entropy at all).
- *
- * Returns the number of bytes processed from input, which is bounded
- * by CRNG_INIT_CNT_THRESH if account is true.
*/
-static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+static void crng_pre_init_inject(const void *input, size_t len, bool account)
{
static int crng_init_cnt = 0;
struct blake2s_state hash;
@@ -452,18 +456,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
spin_lock_irqsave(&base_crng.lock, flags);
if (crng_init != 0) {
spin_unlock_irqrestore(&base_crng.lock, flags);
- return 0;
+ return;
}
- if (account)
- len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
-
blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
blake2s_update(&hash, input, len);
blake2s_final(&hash, base_crng.key);
if (account) {
- crng_init_cnt += len;
+ crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
++base_crng.generation;
crng_init = 1;
@@ -474,8 +475,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
if (crng_init == 1)
pr_notice("fast init done\n");
-
- return len;
}
static void _get_random_bytes(void *buf, size_t nbytes)
@@ -531,49 +530,59 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- bool large_request = nbytes > 256;
- ssize_t ret = 0;
- size_t len;
+ size_t len, left, ret = 0;
u32 chacha_state[CHACHA_STATE_WORDS];
u8 output[CHACHA_BLOCK_SIZE];
if (!nbytes)
return 0;
- len = min_t(size_t, 32, nbytes);
- crng_make_state(chacha_state, output, len);
-
- if (copy_to_user(buf, output, len))
- return -EFAULT;
- nbytes -= len;
- buf += len;
- ret += len;
-
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current))
- break;
- schedule();
- }
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+ * bytes, in case userspace causes copy_to_user() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ /*
+ * However, if we're doing a read of len <= 32, we don't need to
+ * use chacha_state after, so we can simply return those bytes to
+ * the user directly.
+ */
+ if (nbytes <= CHACHA_KEY_SIZE) {
+ ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
+ goto out_zero_chacha;
+ }
+ for (;;) {
chacha20_block(chacha_state, output);
if (unlikely(chacha_state[12] == 0))
++chacha_state[13];
len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, output, len)) {
- ret = -EFAULT;
+ left = copy_to_user(buf, output, len);
+ if (left) {
+ ret += len - left;
break;
}
- nbytes -= len;
buf += len;
ret += len;
+ nbytes -= len;
+ if (!nbytes)
+ break;
+
+ BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
memzero_explicit(output, sizeof(output));
- return ret;
+out_zero_chacha:
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ return ret ? ret : -EFAULT;
}
/*
@@ -1016,7 +1025,7 @@ int __init rand_initialize(void)
*/
void add_device_randomness(const void *buf, size_t size)
{
- cycles_t cycles = random_get_entropy();
+ unsigned long cycles = random_get_entropy();
unsigned long flags, now = jiffies;
if (crng_init == 0 && size)
@@ -1047,8 +1056,7 @@ struct timer_rand_state {
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- cycles_t cycles = random_get_entropy();
- unsigned long flags, now = jiffies;
+ unsigned long cycles = random_get_entropy(), now = jiffies, flags;
long delta, delta2, delta3;
spin_lock_irqsave(&input_pool.lock, flags);
@@ -1141,12 +1149,9 @@ void add_hwgenerator_randomness(const void *buffer, size_t count,
size_t entropy)
{
if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
- size_t ret = crng_pre_init_inject(buffer, count, true);
- mix_pool_bytes(buffer, ret);
- count -= ret;
- buffer += ret;
- if (!count || crng_init == 0)
- return;
+ crng_pre_init_inject(buffer, count, true);
+ mix_pool_bytes(buffer, count);
+ return;
}
/*
@@ -1340,8 +1345,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
void add_interrupt_randomness(int irq)
{
enum { MIX_INFLIGHT = 1U << 31 };
- cycles_t cycles = random_get_entropy();
- unsigned long now = jiffies;
+ unsigned long cycles = random_get_entropy(), now = jiffies;
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned int new_count;
@@ -1354,16 +1358,12 @@ void add_interrupt_randomness(int irq)
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
- if (sizeof(cycles) == 8)
+ if (sizeof(unsigned long) == 8) {
irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
- else {
+ irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+ } else {
irq_data.u32[0] = cycles ^ irq;
irq_data.u32[1] = now;
- }
-
- if (sizeof(unsigned long) == 8)
- irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
- else {
irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
irq_data.u32[3] = get_reg(fast_pool, regs);
}
@@ -1410,7 +1410,7 @@ static void entropy_timer(struct timer_list *t)
static void try_to_generate_entropy(void)
{
struct {
- cycles_t cycles;
+ unsigned long cycles;
struct timer_list timer;
} stack;
@@ -1545,6 +1545,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
{
static int maxwarn = 10;
+ /*
+ * Opportunistically attempt to initialize the RNG on platforms that
+ * have fast cycle counters, but don't (for now) require it to succeed.
+ */
+ if (!crng_ready())
+ try_to_generate_entropy();
+
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
if (__ratelimit(&urandom_warning))
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 23cc8297ec4c..d429ba52a719 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -117,6 +117,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
tmp_rate = parent_rate;
else
tmp_rate = parent_rate / div;
+
+ if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
+ return;
+
tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) {
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 3ad20e75fd23..48a1eb9f2d55 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -941,6 +941,7 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
u64 temp = (u64)parent_rate << CM_DIV_FRAC_BITS;
u32 div, mindiv, maxdiv;
+ do_div(temp, rate);
div = temp;
div &= ~unused_frac_mask;
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index aa1561b773d6..070c3b896559 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -11,20 +11,48 @@
#include <dt-bindings/clock/microchip,mpfs-clock.h>
/* address offset of control registers */
+#define REG_MSSPLL_REF_CR 0x08u
+#define REG_MSSPLL_POSTDIV_CR 0x10u
+#define REG_MSSPLL_SSCG_2_CR 0x2Cu
#define REG_CLOCK_CONFIG_CR 0x08u
+#define REG_RTC_CLOCK_CR 0x0Cu
#define REG_SUBBLK_CLOCK_CR 0x84u
#define REG_SUBBLK_RESET_CR 0x88u
+#define MSSPLL_FBDIV_SHIFT 0x00u
+#define MSSPLL_FBDIV_WIDTH 0x0Cu
+#define MSSPLL_REFDIV_SHIFT 0x08u
+#define MSSPLL_REFDIV_WIDTH 0x06u
+#define MSSPLL_POSTDIV_SHIFT 0x08u
+#define MSSPLL_POSTDIV_WIDTH 0x07u
+#define MSSPLL_FIXED_DIV 4u
+
struct mpfs_clock_data {
void __iomem *base;
+ void __iomem *msspll_base;
struct clk_hw_onecell_data hw_data;
};
+struct mpfs_msspll_hw_clock {
+ void __iomem *base;
+ unsigned int id;
+ u32 reg_offset;
+ u32 shift;
+ u32 width;
+ u32 flags;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+#define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw)
+
struct mpfs_cfg_clock {
const struct clk_div_table *table;
unsigned int id;
+ u32 reg_offset;
u8 shift;
u8 width;
+ u8 flags;
};
struct mpfs_cfg_hw_clock {
@@ -55,7 +83,7 @@ struct mpfs_periph_hw_clock {
*/
static DEFINE_SPINLOCK(mpfs_clk_lock);
-static const struct clk_parent_data mpfs_cfg_parent[] = {
+static const struct clk_parent_data mpfs_ext_ref[] = {
{ .index = 0 },
};
@@ -69,6 +97,86 @@ static const struct clk_div_table mpfs_div_ahb_table[] = {
{ 0, 0 }
};
+/*
+ * The only two supported reference clock frequencies for the PolarFire SoC are
+ * 100 and 125 MHz, as the rtc reference is required to be 1 MHz.
+ * It therefore only needs to have divider table entries corresponding to
+ * divide by 100 and 125.
+ */
+static const struct clk_div_table mpfs_div_rtcref_table[] = {
+ { 100, 100 }, { 125, 125 },
+ { 0, 0 }
+};
+
+static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
+ void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
+ void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
+ void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR;
+ u32 mult, ref_div, postdiv;
+
+ mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
+ mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
+ postdiv = readl_relaxed(postdiv_addr) >> MSSPLL_POSTDIV_SHIFT;
+ postdiv &= clk_div_mask(MSSPLL_POSTDIV_WIDTH);
+
+ return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv);
+}
+
+static const struct clk_ops mpfs_clk_msspll_ops = {
+ .recalc_rate = mpfs_clk_msspll_recalc_rate,
+};
+
+#define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \
+ .id = _id, \
+ .shift = _shift, \
+ .width = _width, \
+ .reg_offset = _offset, \
+ .flags = _flags, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_msspll_ops, 0), \
+}
+
+static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = {
+ CLK_PLL(CLK_MSSPLL, "clk_msspll", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT,
+ MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR),
+};
+
+static int mpfs_clk_register_msspll(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hw,
+ void __iomem *base)
+{
+ msspll_hw->base = base;
+
+ return devm_clk_hw_register(dev, &msspll_hw->hw);
+}
+
+static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws,
+ unsigned int num_clks, struct mpfs_clock_data *data)
+{
+ void __iomem *base = data->msspll_base;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_clks; i++) {
+ struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i];
+
+ ret = mpfs_clk_register_msspll(dev, msspll_hw, base);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register msspll id: %d\n",
+ CLK_MSSPLL);
+
+ data->hw_data.hws[msspll_hw->id] = &msspll_hw->hw;
+ }
+
+ return 0;
+}
+
+/*
+ * "CFG" clocks
+ */
+
static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
@@ -76,10 +184,10 @@ static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long p
void __iomem *base_addr = cfg_hw->sys_base;
u32 val;
- val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR) >> cfg->shift;
+ val = readl_relaxed(base_addr + cfg->reg_offset) >> cfg->shift;
val &= clk_div_mask(cfg->width);
- return prate / (1u << val);
+ return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
}
static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
@@ -105,11 +213,10 @@ static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned
return divider_setting;
spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR);
+ val = readl_relaxed(base_addr + cfg->reg_offset);
val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift);
val |= divider_setting << cfg->shift;
- writel_relaxed(val, base_addr + REG_CLOCK_CONFIG_CR);
+ writel_relaxed(val, base_addr + cfg->reg_offset);
spin_unlock_irqrestore(&mpfs_clk_lock, flags);
@@ -122,19 +229,33 @@ static const struct clk_ops mpfs_clk_cfg_ops = {
.set_rate = mpfs_cfg_clk_set_rate,
};
-#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags) { \
- .cfg.id = _id, \
- .cfg.shift = _shift, \
- .cfg.width = _width, \
- .cfg.table = _table, \
- .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_cfg_ops, \
- _flags), \
+#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
+ .cfg.id = _id, \
+ .cfg.shift = _shift, \
+ .cfg.width = _width, \
+ .cfg.table = _table, \
+ .cfg.reg_offset = _offset, \
+ .cfg.flags = _flags, \
+ .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
- CLK_CFG(CLK_CPU, "clk_cpu", mpfs_cfg_parent, 0, 2, mpfs_div_cpu_axi_table, 0),
- CLK_CFG(CLK_AXI, "clk_axi", mpfs_cfg_parent, 2, 2, mpfs_div_cpu_axi_table, 0),
- CLK_CFG(CLK_AHB, "clk_ahb", mpfs_cfg_parent, 4, 2, mpfs_div_ahb_table, 0),
+ CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0,
+ REG_CLOCK_CONFIG_CR),
+ CLK_CFG(CLK_AXI, "clk_axi", "clk_msspll", 2, 2, mpfs_div_cpu_axi_table, 0,
+ REG_CLOCK_CONFIG_CR),
+ CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0,
+ REG_CLOCK_CONFIG_CR),
+ {
+ .cfg.id = CLK_RTCREF,
+ .cfg.shift = 0,
+ .cfg.width = 12,
+ .cfg.table = mpfs_div_rtcref_table,
+ .cfg.reg_offset = REG_RTC_CLOCK_CR,
+ .cfg.flags = CLK_DIVIDER_ONE_BASED,
+ .hw.init =
+ CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0),
+ }
};
static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw,
@@ -160,13 +281,17 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
cfg_hw->cfg.id);
- id = cfg_hws[i].cfg.id;
+ id = cfg_hw->cfg.id;
data->hw_data.hws[id] = &cfg_hw->hw;
}
return 0;
}
+/*
+ * peripheral clocks - devices connected to axi or ahb buses.
+ */
+
static int mpfs_periph_clk_enable(struct clk_hw *hw)
{
struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
@@ -200,10 +325,6 @@ static void mpfs_periph_clk_disable(struct clk_hw *hw)
spin_lock_irqsave(&mpfs_clk_lock, flags);
- reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
- val = reg | (1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR);
-
reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
val = reg & ~(1u << periph->shift);
writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
@@ -249,8 +370,10 @@ static const struct clk_ops mpfs_periph_clk_ops = {
* trap handler
* - CLK_MMUART0: reserved by the hss
* - CLK_DDRC: provides clock to the ddr subsystem
- * - CLK_FICx: these provide clocks for sections of the fpga fabric, disabling them would
- * cause the fabric to go into reset
+ * - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect)
+ * clock domain crossers which provide the interface to the FPGA fabric. Disabling them
+ * causes the FPGA fabric to go into reset.
+ * - CLK_ATHENA: The athena clock is FIC4, which is reserved for the Athena TeraFire.
*/
static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
@@ -258,7 +381,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_MAC0, "clk_periph_mac0", PARENT_CLK(AHB), 1, 0),
CLK_PERIPH(CLK_MAC1, "clk_periph_mac1", PARENT_CLK(AHB), 2, 0),
CLK_PERIPH(CLK_MMC, "clk_periph_mmc", PARENT_CLK(AHB), 3, 0),
- CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(AHB), 4, 0),
+ CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(RTCREF), 4, 0),
CLK_PERIPH(CLK_MMUART0, "clk_periph_mmuart0", PARENT_CLK(AHB), 5, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_MMUART1, "clk_periph_mmuart1", PARENT_CLK(AHB), 6, 0),
CLK_PERIPH(CLK_MMUART2, "clk_periph_mmuart2", PARENT_CLK(AHB), 7, 0),
@@ -277,11 +400,11 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
CLK_PERIPH(CLK_GPIO2, "clk_periph_gpio2", PARENT_CLK(AHB), 22, 0),
CLK_PERIPH(CLK_DDRC, "clk_periph_ddrc", PARENT_CLK(AHB), 23, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AHB), 24, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AHB), 25, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AHB), 26, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AHB), 27, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AHB), 28, 0),
+ CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AXI), 24, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AXI), 25, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AXI), 26, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AXI), 27, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AXI), 28, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0),
};
@@ -322,8 +445,9 @@ static int mpfs_clk_probe(struct platform_device *pdev)
unsigned int num_clks;
int ret;
- /* CLK_RESERVED is not part of cfg_clks nor periph_clks, so add 1 */
- num_clks = ARRAY_SIZE(mpfs_cfg_clks) + ARRAY_SIZE(mpfs_periph_clks) + 1;
+ /* CLK_RESERVED is not part of clock arrays, so add 1 */
+ num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_cfg_clks)
+ + ARRAY_SIZE(mpfs_periph_clks) + 1;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL);
if (!clk_data)
@@ -333,8 +457,17 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (IS_ERR(clk_data->base))
return PTR_ERR(clk_data->base);
+ clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(clk_data->msspll_base))
+ return PTR_ERR(clk_data->msspll_base);
+
clk_data->hw_data.num = num_clks;
+ ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks),
+ clk_data);
+ if (ret)
+ return ret;
+
ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index f675fd969c4d..e9c357309fd9 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -818,7 +818,7 @@ EXPORT_SYMBOL_GPL(clk_pixel_ops);
static int clk_gfx3d_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_rate_request parent_req = { };
+ struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
struct clk_hw *xo, *p0, *p1, *p2;
unsigned long p0_rate;
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 8a10bade7e0d..d65398497d5f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -241,6 +241,7 @@ static struct clk_init_data rtc_32k_init_data = {
.ops = &ccu_mux_ops,
.parent_hws = rtc_32k_parents,
.num_parents = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */
+ .flags = CLK_IS_CRITICAL,
};
static struct ccu_mux rtc_32k_clk = {
@@ -297,10 +298,6 @@ static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = {
.hw_clks = &sun6i_rtc_ccu_hw_clks,
};
-static const struct clk_parent_data sun50i_h6_osc32k_fanout_parents[] = {
- { .hw = &osc32k_clk.common.hw },
-};
-
static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = {
{ .hw = &osc32k_clk.common.hw },
{ .fw_name = "pll-32k" },
@@ -313,13 +310,6 @@ static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = {
{ .hw = &osc24M_32k_clk.common.hw }
};
-static const struct sun6i_rtc_match_data sun50i_h6_rtc_ccu_data = {
- .have_ext_osc32k = true,
- .have_iosc_calibration = true,
- .osc32k_fanout_parents = sun50i_h6_osc32k_fanout_parents,
- .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h6_osc32k_fanout_parents),
-};
-
static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = {
.have_iosc_calibration = true,
.rtc_32k_single_parent = true,
@@ -335,10 +325,6 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
static const struct of_device_id sun6i_rtc_ccu_match[] = {
{
- .compatible = "allwinner,sun50i-h6-rtc",
- .data = &sun50i_h6_rtc_ccu_data,
- },
- {
.compatible = "allwinner,sun50i-h616-rtc",
.data = &sun50i_h616_rtc_ccu_data,
},
@@ -346,6 +332,7 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
.compatible = "allwinner,sun50i-r329-rtc",
.data = &sun50i_r329_rtc_ccu_data,
},
+ {},
};
int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 542b31d6e96d..636bcf2439ef 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -109,6 +109,8 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
/* one clock/reset pair per word */
count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH);
data->membase = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f9d593ff4718..0253731d6d25 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -24,13 +24,17 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
+#define GT_IRQ_STATUS BIT(2)
+
#define HZ_PER_KHZ 1000
struct qcom_cpufreq_soc_data {
u32 reg_enable;
+ u32 reg_domain_state;
u32 reg_dcvs_ctrl;
u32 reg_freq_lut;
u32 reg_volt_lut;
+ u32 reg_intr_clr;
u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
@@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
-static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
{
- unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
+ unsigned int lval;
+
+ if (data->soc_data->reg_current_vote)
+ lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
+ else
+ lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
- return (val & 0x3FF) * 19200;
+ return lval * xo_rate;
}
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
struct cpufreq_policy *policy = data->policy;
- int cpu = cpumask_first(policy->cpus);
+ int cpu = cpumask_first(policy->related_cpus);
struct device *dev = get_cpu_device(cpu);
unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp;
- unsigned int freq;
/*
* Get the h/w throttled frequency, normalize it using the
* registered opp table and use it to calculate thermal pressure.
*/
- freq = qcom_lmh_get_throttle_freq(data);
- freq_hz = freq * HZ_PER_KHZ;
+ freq_hz = qcom_lmh_get_throttle_freq(data);
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
- dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+ if (IS_ERR(opp)) {
+ dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
+ } else {
+ throttled_freq = freq_hz / HZ_PER_KHZ;
- throttled_freq = freq_hz / HZ_PER_KHZ;
+ /* Update thermal pressure (the boost frequencies are accepted) */
+ arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ dev_pm_opp_put(opp);
+ }
/*
* In the unlikely case policy is unregistered do not enable
@@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0);
+ if (c_data->soc_data->reg_intr_clr)
+ writel_relaxed(GT_IRQ_STATUS,
+ c_data->base + c_data->soc_data->reg_intr_clr);
+
return IRQ_HANDLED;
}
@@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
static const struct qcom_cpufreq_soc_data epss_soc_data = {
.reg_enable = 0x0,
+ .reg_domain_state = 0x20,
.reg_dcvs_ctrl = 0xb0,
.reg_freq_lut = 0x100,
.reg_volt_lut = 0x200,
+ .reg_intr_clr = 0x308,
.reg_perf_state = 0x320,
.lut_row_size = 4,
};
@@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
-static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int ret;
+
+ ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+ data->irq_name, data->throttle_irq);
+
+ return ret;
+}
+
+static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
if (data->throttle_irq <= 0)
- return;
+ return 0;
mutex_lock(&data->throttle_lock);
data->cancel_throttle = true;
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
+ irq_set_affinity_hint(data->throttle_irq, NULL);
+
+ return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
free_irq(data->throttle_irq, data);
}
@@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
+ .online = qcom_cpufreq_hw_cpu_online,
+ .offline = qcom_cpufreq_hw_cpu_offline,
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 2deed8d8773f..75e1bf3a08f7 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret)
+ if (ret) {
+ kfree(opp_tables);
return ret;
+ }
snprintf(name, MAX_NAME_LEN, "speed%d", speed);
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index b459eda2cd37..5c852e671992 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <asm/cpuidle.h>
#include <asm/sbi.h>
+#include <asm/smp.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 11f30fd48c14..031b5f701a0a 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
+ break;
}
} while (currsize < max);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 8a7267d116b7..3f2182d66829 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -436,7 +436,6 @@ static int wait_for_media_ready(struct cxl_dev_state *cxlds)
for (i = mbox_ready_timeout; i; i--) {
u32 temp;
- int rc;
rc = pci_read_config_dword(
pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 511805dbeb75..4c9eb53ba3f8 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -12,6 +12,7 @@ dmabuf_selftests-y := \
selftest.o \
st-dma-fence.o \
st-dma-fence-chain.o \
+ st-dma-fence-unwrap.o \
st-dma-resv.o
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index df23239b04fc..53297a0d9c57 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -407,6 +407,7 @@ static inline int is_dma_buf_file(struct file *file)
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -416,6 +417,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
@@ -543,10 +551,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
- ret = dma_buf_stats_setup(dmabuf);
- if (ret)
- goto err_sysfs;
-
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
@@ -554,6 +558,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
+ ret = dma_buf_stats_setup(dmabuf);
+ if (ret)
+ goto err_sysfs;
+
return dmabuf;
err_sysfs:
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index cb1bacb5a42b..5c8a7084577b 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -159,6 +159,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence_array *array;
size_t size = sizeof(*array);
+ WARN_ON(!num_fences || !fences);
+
/* Allocate the callback structures behind the array. */
size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
@@ -219,3 +221,33 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context)
return true;
}
EXPORT_SYMBOL(dma_fence_match_context);
+
+struct dma_fence *dma_fence_array_first(struct dma_fence *head)
+{
+ struct dma_fence_array *array;
+
+ if (!head)
+ return NULL;
+
+ array = to_dma_fence_array(head);
+ if (!array)
+ return head;
+
+ if (!array->num_fences)
+ return NULL;
+
+ return array->fences[0];
+}
+EXPORT_SYMBOL(dma_fence_array_first);
+
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index)
+{
+ struct dma_fence_array *array = to_dma_fence_array(head);
+
+ if (!array || index >= array->num_fences)
+ return NULL;
+
+ return array->fences[index];
+}
+EXPORT_SYMBOL(dma_fence_array_next);
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 97d73aaa31da..851965867d9c 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,4 +12,5 @@
selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
selftest(dma_fence, dma_fence)
selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_unwrap, dma_fence_unwrap)
selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
new file mode 100644
index 000000000000..039f016b57be
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#if 0
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#endif
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static inline struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static struct dma_fence *mock_array(unsigned int num_fences, ...)
+{
+ struct dma_fence_array *array;
+ struct dma_fence **fences;
+ va_list valist;
+ int i;
+
+ fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return NULL;
+
+ va_start(valist, num_fences);
+ for (i = 0; i < num_fences; ++i)
+ fences[i] = va_arg(valist, typeof(*fences));
+ va_end(valist);
+
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array)
+ goto cleanup;
+ return &array->base;
+
+cleanup:
+ for (i = 0; i < num_fences; ++i)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return NULL;
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+ struct dma_fence *fence)
+{
+ struct dma_fence_chain *f;
+
+ f = dma_fence_chain_alloc();
+ if (!f) {
+ dma_fence_put(prev);
+ dma_fence_put(fence);
+ return NULL;
+ }
+
+ dma_fence_chain_init(f, prev, fence, 1);
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f, *chain, *array;
+ int err = 0;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ array = mock_array(1, f);
+ if (!array)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, array);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(chain);
+ return err;
+}
+
+static int unwrap_array(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *array;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ array = mock_array(2, f1, f2);
+ if (!array)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, array) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_signal(f1);
+ dma_fence_signal(f2);
+ dma_fence_put(array);
+ return 0;
+}
+
+static int unwrap_chain(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *chain;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ chain = mock_chain(f1, f2);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, chain) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_signal(f1);
+ dma_fence_signal(f2);
+ dma_fence_put(chain);
+ return 0;
+}
+
+static int unwrap_chain_array(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *array, *chain;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ array = mock_array(2, f1, f2);
+ if (!array)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, array);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, chain) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_signal(f1);
+ dma_fence_signal(f2);
+ dma_fence_put(chain);
+ return 0;
+}
+
+int dma_fence_unwrap(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(unwrap_array),
+ SUBTEST(unwrap_chain),
+ SUBTEST(unwrap_chain_array),
+ };
+
+ return subtests(tests, NULL);
+}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 394e6e1e9686..514d213261df 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012 Google, Inc.
*/
+#include <linux/dma-fence-unwrap.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
@@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
-static struct dma_fence **get_fences(struct sync_file *sync_file,
- int *num_fences)
-{
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- *num_fences = array->num_fences;
- return array->fences;
- }
-
- *num_fences = 1;
- return &sync_file->fence;
-}
-
static void add_fence(struct dma_fence **fences,
int *i, struct dma_fence *fence)
{
@@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
+ struct dma_fence *a_fence, *b_fence, **fences;
+ struct dma_fence_unwrap a_iter, b_iter;
+ unsigned int index, num_fences;
struct sync_file *sync_file;
- struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
- int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- a_fences = get_fences(a, &a_num_fences);
- b_fences = get_fences(b, &b_num_fences);
- if (a_num_fences > INT_MAX - b_num_fences)
- goto err;
+ num_fences = 0;
+ dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
+ ++num_fences;
+ dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
+ ++num_fences;
- num_fences = a_num_fences + b_num_fences;
+ if (num_fences > INT_MAX)
+ goto err_free_sync_file;
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences)
- goto err;
+ goto err_free_sync_file;
/*
- * Assume sync_file a and b are both ordered and have no
- * duplicates with the same context.
+ * We can't guarantee that fences in both a and b are ordered, but it is
+ * still quite likely.
*
- * If a sync_file can only be created with sync_file_merge
- * and sync_file_create, this is a reasonable assumption.
+ * So attempt to order the fences as we pass over them and merge fences
+ * with the same context.
*/
- for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
- struct dma_fence *pt_a = a_fences[i_a];
- struct dma_fence *pt_b = b_fences[i_b];
- if (pt_a->context < pt_b->context) {
- add_fence(fences, &i, pt_a);
+ index = 0;
+ for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
+ b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
+ a_fence || b_fence; ) {
+
+ if (!b_fence) {
+ add_fence(fences, &index, a_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
+
+ } else if (!a_fence) {
+ add_fence(fences, &index, b_fence);
+ b_fence = dma_fence_unwrap_next(&b_iter);
+
+ } else if (a_fence->context < b_fence->context) {
+ add_fence(fences, &index, a_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- add_fence(fences, &i, pt_b);
+ } else if (b_fence->context < a_fence->context) {
+ add_fence(fences, &index, b_fence);
+ b_fence = dma_fence_unwrap_next(&b_iter);
+
+ } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
+ a_fence->ops)) {
+ add_fence(fences, &index, a_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
+ b_fence = dma_fence_unwrap_next(&b_iter);
- i_b++;
} else {
- if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
- pt_a->ops))
- add_fence(fences, &i, pt_a);
- else
- add_fence(fences, &i, pt_b);
-
- i_a++;
- i_b++;
+ add_fence(fences, &index, b_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
+ b_fence = dma_fence_unwrap_next(&b_iter);
}
}
- for (; i_a < a_num_fences; i_a++)
- add_fence(fences, &i, a_fences[i_a]);
-
- for (; i_b < b_num_fences; i_b++)
- add_fence(fences, &i, b_fences[i_b]);
-
- if (i == 0)
- fences[i++] = dma_fence_get(a_fences[0]);
+ if (index == 0)
+ fences[index++] = dma_fence_get_stub();
- if (num_fences > i) {
- nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL);
- if (!nfences)
- goto err;
+ if (num_fences > index) {
+ struct dma_fence **tmp;
- fences = nfences;
+ /* Keep going even when reducing the size failed */
+ tmp = krealloc_array(fences, index, sizeof(*fences),
+ GFP_KERNEL);
+ if (tmp)
+ fences = tmp;
}
- if (sync_file_set_fence(sync_file, fences, i) < 0)
- goto err;
+ if (sync_file_set_fence(sync_file, fences, index) < 0)
+ goto err_put_fences;
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
-err:
- while (i)
- dma_fence_put(fences[--i]);
+err_put_fences:
+ while (index)
+ dma_fence_put(fences[--index]);
kfree(fences);
+
+err_free_sync_file:
fput(sync_file->file);
return NULL;
-
}
static int sync_file_release(struct inode *inode, struct file *file)
@@ -398,11 +396,13 @@ static int sync_fill_fence_info(struct dma_fence *fence,
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
unsigned long arg)
{
- struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
- struct dma_fence **fences;
+ struct dma_fence_unwrap iter;
+ struct sync_file_info info;
+ unsigned int num_fences;
+ struct dma_fence *fence;
+ int ret;
__u32 size;
- int num_fences, ret, i;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
@@ -410,7 +410,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.flags || info.pad)
return -EINVAL;
- fences = get_fences(sync_file, &num_fences);
+ num_fences = 0;
+ dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
+ ++num_fences;
/*
* Passing num_fences = 0 means that userspace doesn't want to
@@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < num_fences; i++) {
- int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+ num_fences = 0;
+ dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
+ int status;
+
+ status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
info.status = info.status <= 0 ? info.status : status;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 1476156af74b..def564d1e8fa 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
- struct at_xdmac_desc *desc, *_desc;
+ struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
@@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* microblock.
*/
descs_list = &desc->descs_list;
- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
+ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
+ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
+ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
+ desc = iter;
break;
+ }
}
residue += cur_ubc << dwidth;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 329fc2e57b70..33bc1e6c4cf2 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -414,14 +414,18 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
+
#ifdef CONFIG_64BIT
- SET_CH_64(dw, chan->dir, chan->id, llp.reg,
- chunk->ll_region.paddr);
+ /* llp is not aligned on 64bit -> keep 32bit accesses */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
#else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
#endif /* CONFIG_64BIT */
}
/* Doorbell */
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 3061fe857d69..f652da6ab47d 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
{
lockdep_assert_held(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
wq->size = 0;
wq->group = NULL;
}
@@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
- idxd_wq_device_reset_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
+ idxd_wq_device_reset_cleanup(wq);
}
}
void idxd_device_clear_state(struct idxd_device *idxd)
{
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return;
+
idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd);
idxd_device_wqs_clear_state(idxd);
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index e289fd48711a..c01db23e3333 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
*/
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
{
- int rc, retries = 0;
+ unsigned int retries = wq->enqcmds_retries;
+ int rc;
do {
rc = enqcmds(portal, desc);
if (rc == 0)
break;
cpu_relax();
- } while (retries++ < wq->enqcmds_retries);
+ } while (retries--);
return rc;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7e19ab92b61a..dfd549685c46 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
u64 xfer_size;
int rc;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
u64 batch_size;
int rc;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 70c0aa931ddf..6196a7b3956b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
s32 per_2_firi_addr;
s32 mcu_2_firi_addr;
s32 uart_2_per_addr;
- s32 uart_2_mcu_ram_addr;
+ s32 uart_2_mcu_addr;
s32 per_2_app_addr;
s32 mcu_2_app_addr;
s32 per_2_per_addr;
s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_ram_addr;
+ s32 uartsh_2_mcu_addr;
s32 per_2_shp_addr;
s32 mcu_2_shp_addr;
s32 ata_2_mcu_addr;
@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
- s32 uart_2_mcu_addr;
- s32 uartsh_2_mcu_addr;
+ s32 uart_2_mcu_rom_addr;
+ s32 uartsh_2_mcu_rom_addr;
/* End of v3 array */
s32 mcu_2_zqspi_addr;
/* End of v4 array */
@@ -1796,17 +1796,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
/*
- * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
- * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
- * to be compatible with legacy freescale/nxp sdma firmware, and they
- * are located in the bottom part of sdma_script_start_addrs which are
- * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
+ * For compatibility with NXP internal legacy kernel before 4.19 which
+ * is based on uart ram script and mainline kernel based on uart rom
+ * script, both uart ram/rom scripts are present in newer sdma
+ * firmware. Use the rom versions if they are present (V3 or newer).
*/
- if (addr->uart_2_mcu_addr)
- sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
- if (addr->uartsh_2_mcu_addr)
- sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
-
+ if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
+ if (addr->uart_2_mcu_rom_addr)
+ sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
+ if (addr->uartsh_2_mcu_rom_addr)
+ sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
+ }
}
static void sdma_load_firmware(const struct firmware *fw, void *context)
@@ -1885,7 +1885,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
u32 reg, val, shift, num_map, i;
int ret = 0;
- if (IS_ERR(np) || IS_ERR(gpr_np))
+ if (IS_ERR(np) || !gpr_np)
goto out;
event_remap = of_find_property(np, propname, NULL);
@@ -1933,7 +1933,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
}
out:
- if (!IS_ERR(gpr_np))
+ if (gpr_np)
of_node_put(gpr_np);
return ret;
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 375e7e647df6..a1517ef1f4a0 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
unsigned int status;
int ret;
- ret = pm_runtime_get_sync(mtkd->ddev.dev);
+ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
if (ret < 0) {
pm_runtime_put_noidle(chan->device->dev);
return ret;
@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
ret = readx_poll_timeout(readl, c->base + VFF_EN,
status, !status, 10, 100);
if (ret)
- return ret;
+ goto err_pm;
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
if (ret < 0) {
dev_err(chan->device->dev, "Can't request dma IRQ\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_pm;
}
if (mtkd->support_33bits)
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
+err_pm:
+ pm_runtime_put_noidle(mtkd->ddev.dev);
return ret;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 58ab63642e72..d3e2477948c8 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -55,6 +55,7 @@ config EDAC_DECODE_MCE
config EDAC_GHES
bool "Output ACPI APEI/GHES BIOS detected errors via EDAC"
depends on ACPI_APEI_GHES && (EDAC=y)
+ select UEFI_CPER
help
Not all machines support hardware-driven error report. Some of those
provide a BIOS-driven error report mechanism via ACPI, using the
@@ -484,7 +485,7 @@ config EDAC_ARMADA_XP
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || ARCH_MXC
help
Support for error detection and correction on the Synopsys DDR
memory controller.
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index b1f46a974b9e..038abbb83f4b 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -286,17 +286,10 @@ static int axp_mc_probe(struct platform_device *pdev)
struct edac_mc_layer layers[1];
const struct of_device_id *id;
struct mem_ctl_info *mci;
- struct resource *r;
void __iomem *base;
uint32_t config;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "Unable to get mem resource\n");
- return -ENODEV;
- }
-
- base = devm_ioremap_resource(&pdev->dev, r);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map regs\n");
return PTR_ERR(base);
@@ -516,15 +509,8 @@ static int aurora_l2_probe(struct platform_device *pdev)
const struct of_device_id *id;
uint32_t l2x0_aux_ctrl;
void __iomem *base;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "Unable to get mem resource\n");
- return -ENODEV;
- }
- base = devm_ioremap_resource(&pdev->dev, r);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map regs\n");
return PTR_ERR(base);
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index b8a7d9594afd..1fa5ca57e9ec 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -489,7 +489,7 @@ static int dmc520_edac_probe(struct platform_device *pdev)
dev = &pdev->dev;
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
- irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name);
+ irq = platform_get_irq_byname_optional(pdev, dmc520_irq_configs[idx].name);
irqs[idx] = irq;
masks[idx] = dmc520_irq_configs[idx].mask;
if (irq >= 0) {
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 8c4d947fb848..19522c568aa5 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -47,99 +47,67 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
}
#endif /* CONFIG_EDAC_DEBUG */
-struct edac_device_ctl_info *edac_device_alloc_ctl_info(
- unsigned sz_private,
- char *edac_device_name, unsigned nr_instances,
- char *edac_block_name, unsigned nr_blocks,
- unsigned offset_value, /* zero, 1, or other based offset */
- struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
- int device_index)
+/*
+ * @off_val: zero, 1, or other based offset
+ */
+struct edac_device_ctl_info *
+edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances,
+ char *blk_name, unsigned nr_blocks, unsigned off_val,
+ struct edac_dev_sysfs_block_attribute *attrib_spec,
+ unsigned nr_attrib, int device_index)
{
- struct edac_device_ctl_info *dev_ctl;
- struct edac_device_instance *dev_inst, *inst;
- struct edac_device_block *dev_blk, *blk_p, *blk;
struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
- unsigned total_size;
- unsigned count;
+ struct edac_device_block *dev_blk, *blk_p, *blk;
+ struct edac_device_instance *dev_inst, *inst;
+ struct edac_device_ctl_info *dev_ctl;
unsigned instance, block, attr;
- void *pvt, *p;
+ void *pvt;
int err;
edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
- /* Calculate the size of memory we need to allocate AND
- * determine the offsets of the various item arrays
- * (instance,block,attrib) from the start of an allocated structure.
- * We want the alignment of each item (instance,block,attrib)
- * to be at least as stringent as what the compiler would
- * provide if we could simply hardcode everything into a single struct.
- */
- p = NULL;
- dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
+ dev_ctl = kzalloc(sizeof(struct edac_device_ctl_info), GFP_KERNEL);
+ if (!dev_ctl)
+ return NULL;
- /* Calc the 'end' offset past end of ONE ctl_info structure
- * which will become the start of the 'instance' array
- */
- dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
+ dev_inst = kcalloc(nr_instances, sizeof(struct edac_device_instance), GFP_KERNEL);
+ if (!dev_inst)
+ goto free;
- /* Calc the 'end' offset past the instance array within the ctl_info
- * which will become the start of the block array
- */
- count = nr_instances * nr_blocks;
- dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
+ dev_ctl->instances = dev_inst;
- /* Calc the 'end' offset past the dev_blk array
- * which will become the start of the attrib array, if any.
- */
- /* calc how many nr_attrib we need */
- if (nr_attrib > 0)
- count *= nr_attrib;
- dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
+ dev_blk = kcalloc(nr_instances * nr_blocks, sizeof(struct edac_device_block), GFP_KERNEL);
+ if (!dev_blk)
+ goto free;
- /* Calc the 'end' offset past the attributes array */
- pvt = edac_align_ptr(&p, sz_private, 1);
+ dev_ctl->blocks = dev_blk;
- /* 'pvt' now points to where the private data area is.
- * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
- * is baselined at ZERO
- */
- total_size = ((unsigned long)pvt) + sz_private;
+ if (nr_attrib) {
+ dev_attrib = kcalloc(nr_attrib, sizeof(struct edac_dev_sysfs_block_attribute),
+ GFP_KERNEL);
+ if (!dev_attrib)
+ goto free;
- /* Allocate the amount of memory for the set of control structures */
- dev_ctl = kzalloc(total_size, GFP_KERNEL);
- if (dev_ctl == NULL)
- return NULL;
+ dev_ctl->attribs = dev_attrib;
+ }
- /* Adjust pointers so they point within the actual memory we
- * just allocated rather than an imaginary chunk of memory
- * located at address 0.
- * 'dev_ctl' points to REAL memory, while the others are
- * ZERO based and thus need to be adjusted to point within
- * the allocated memory.
- */
- dev_inst = (struct edac_device_instance *)
- (((char *)dev_ctl) + ((unsigned long)dev_inst));
- dev_blk = (struct edac_device_block *)
- (((char *)dev_ctl) + ((unsigned long)dev_blk));
- dev_attrib = (struct edac_dev_sysfs_block_attribute *)
- (((char *)dev_ctl) + ((unsigned long)dev_attrib));
- pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
-
- /* Begin storing the information into the control info structure */
- dev_ctl->dev_idx = device_index;
- dev_ctl->nr_instances = nr_instances;
- dev_ctl->instances = dev_inst;
- dev_ctl->pvt_info = pvt;
+ if (pvt_sz) {
+ pvt = kzalloc(pvt_sz, GFP_KERNEL);
+ if (!pvt)
+ goto free;
+
+ dev_ctl->pvt_info = pvt;
+ }
+
+ dev_ctl->dev_idx = device_index;
+ dev_ctl->nr_instances = nr_instances;
/* Default logging of CEs and UEs */
dev_ctl->log_ce = 1;
dev_ctl->log_ue = 1;
/* Name of this edac device */
- snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
-
- edac_dbg(4, "edac_dev=%p next after end=%p\n",
- dev_ctl, pvt + sz_private);
+ snprintf(dev_ctl->name, sizeof(dev_ctl->name),"%s", dev_name);
/* Initialize every Instance */
for (instance = 0; instance < nr_instances; instance++) {
@@ -150,15 +118,14 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
inst->blocks = blk_p;
/* name of this instance */
- snprintf(inst->name, sizeof(inst->name),
- "%s%u", edac_device_name, instance);
+ snprintf(inst->name, sizeof(inst->name), "%s%u", dev_name, instance);
/* Initialize every block in each instance */
for (block = 0; block < nr_blocks; block++) {
blk = &blk_p[block];
blk->instance = inst;
snprintf(blk->name, sizeof(blk->name),
- "%s%d", edac_block_name, block+offset_value);
+ "%s%d", blk_name, block + off_val);
edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
instance, inst, block, blk, blk->name);
@@ -210,10 +177,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
* Initialize the 'root' kobj for the edac_device controller
*/
err = edac_device_register_sysfs_main_kobj(dev_ctl);
- if (err) {
- kfree(dev_ctl);
- return NULL;
- }
+ if (err)
+ goto free;
/* at this point, the root kobj is valid, and in order to
* 'free' the object, then the function:
@@ -223,6 +188,11 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
*/
return dev_ctl;
+
+free:
+ __edac_device_free_ctl_info(dev_ctl);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
index fc2d2c218064..3f44e6b9d387 100644
--- a/drivers/edac/edac_device.h
+++ b/drivers/edac/edac_device.h
@@ -216,6 +216,8 @@ struct edac_device_ctl_info {
*/
u32 nr_instances;
struct edac_device_instance *instances;
+ struct edac_device_block *blocks;
+ struct edac_dev_sysfs_block_attribute *attribs;
/* Event counters for the this whole EDAC Device */
struct edac_device_counter counters;
@@ -348,4 +350,16 @@ edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, int inst_nr,
*/
extern int edac_device_alloc_index(void);
extern const char *edac_layer_name[];
+
+/* Free the actual struct */
+static inline void __edac_device_free_ctl_info(struct edac_device_ctl_info *ci)
+{
+ if (ci) {
+ kfree(ci->pvt_info);
+ kfree(ci->attribs);
+ kfree(ci->blocks);
+ kfree(ci->instances);
+ kfree(ci);
+ }
+}
#endif
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 9a61d92bdf42..ac678b4a21fc 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -208,10 +208,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj)
/* decrement the EDAC CORE module ref count */
module_put(edac_dev->owner);
- /* free the control struct containing the 'main' kobj
- * passed in to this routine
- */
- kfree(edac_dev);
+ __edac_device_free_ctl_info(edac_dev);
}
/* ktype for the main (master) kobject */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d2715774af6f..eb58644bb019 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -170,61 +170,6 @@ const char * const edac_mem_types[] = {
};
EXPORT_SYMBOL_GPL(edac_mem_types);
-/**
- * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
- * @p: pointer to a pointer with the memory offset to be used. At
- * return, this will be incremented to point to the next offset
- * @size: Size of the data structure to be reserved
- * @n_elems: Number of elements that should be reserved
- *
- * If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of '*p'.
- *
- * The 'p' pointer is absolutely needed to keep the proper advancing
- * further in memory to the proper offsets when allocating the struct along
- * with its embedded structs, as edac_device_alloc_ctl_info() does it
- * above, for example.
- *
- * At return, the pointer 'p' will be incremented to be used on a next call
- * to this function.
- */
-void *edac_align_ptr(void **p, unsigned int size, int n_elems)
-{
- unsigned int align, r;
- void *ptr = *p;
-
- *p += size * n_elems;
-
- /*
- * 'p' can possibly be an unaligned item X such that sizeof(X) is
- * 'size'. Adjust 'p' so that its alignment is at least as
- * stringent as what the compiler would provide for X and return
- * the aligned result.
- * Here we assume that the alignment of a "long long" is the most
- * stringent alignment that the compiler will ever provide by default.
- * As far as I know, this is a reasonable assumption.
- */
- if (size > sizeof(long))
- align = sizeof(long long);
- else if (size > sizeof(int))
- align = sizeof(long);
- else if (size > sizeof(short))
- align = sizeof(int);
- else if (size > sizeof(char))
- align = sizeof(short);
- else
- return ptr;
-
- r = (unsigned long)ptr % align;
-
- if (r == 0)
- return ptr;
-
- *p += align - r;
-
- return (void *)(((unsigned long)ptr) + align - r);
-}
-
static void _edac_mc_free(struct mem_ctl_info *mci)
{
put_device(&mci->dev);
@@ -257,6 +202,8 @@ static void mci_release(struct device *dev)
}
kfree(mci->csrows);
}
+ kfree(mci->pvt_info);
+ kfree(mci->layers);
kfree(mci);
}
@@ -392,9 +339,8 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
{
struct mem_ctl_info *mci;
struct edac_mc_layer *layer;
- unsigned int idx, size, tot_dimms = 1;
+ unsigned int idx, tot_dimms = 1;
unsigned int tot_csrows = 1, tot_channels = 1;
- void *pvt, *ptr = NULL;
bool per_rank = false;
if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
@@ -416,41 +362,25 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
per_rank = true;
}
- /* Figure out the offsets of the various items from the start of an mc
- * structure. We want the alignment of each item to be at least as
- * stringent as what the compiler would provide if we could simply
- * hardcode everything into a single struct.
- */
- mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
- layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
- pvt = edac_align_ptr(&ptr, sz_pvt, 1);
- size = ((unsigned long)pvt) + sz_pvt;
-
- edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
- size,
- tot_dimms,
- per_rank ? "ranks" : "dimms",
- tot_csrows * tot_channels);
-
- mci = kzalloc(size, GFP_KERNEL);
- if (mci == NULL)
+ mci = kzalloc(sizeof(struct mem_ctl_info), GFP_KERNEL);
+ if (!mci)
return NULL;
+ mci->layers = kcalloc(n_layers, sizeof(struct edac_mc_layer), GFP_KERNEL);
+ if (!mci->layers)
+ goto error;
+
+ mci->pvt_info = kzalloc(sz_pvt, GFP_KERNEL);
+ if (!mci->pvt_info)
+ goto error;
+
mci->dev.release = mci_release;
device_initialize(&mci->dev);
- /* Adjust pointers so they point within the memory we just allocated
- * rather than an imaginary chunk of memory located at address 0.
- */
- layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
- pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
-
/* setup index and various internal pointers */
mci->mc_idx = mc_num;
mci->tot_dimms = tot_dimms;
- mci->pvt_info = pvt;
mci->n_layers = n_layers;
- mci->layers = layer;
memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
mci->nr_csrows = tot_csrows;
mci->num_cschannel = tot_channels;
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index aa1f91688eb8..96f6de0c8ff6 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -59,8 +59,6 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
extern void edac_mc_reset_delay_period(unsigned long value);
-extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
-
/*
* EDAC debugfs functions
*/
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 48c844a72a27..2205d7e731db 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -29,32 +29,31 @@ static LIST_HEAD(edac_pci_list);
static atomic_t pci_indexes = ATOMIC_INIT(0);
struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
- const char *edac_pci_name)
+ const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
- void *p = NULL, *pvt;
- unsigned int size;
edac_dbg(1, "\n");
- pci = edac_align_ptr(&p, sizeof(*pci), 1);
- pvt = edac_align_ptr(&p, 1, sz_pvt);
- size = ((unsigned long)pvt) + sz_pvt;
-
- /* Alloc the needed control struct memory */
- pci = kzalloc(size, GFP_KERNEL);
- if (pci == NULL)
+ pci = kzalloc(sizeof(struct edac_pci_ctl_info), GFP_KERNEL);
+ if (!pci)
return NULL;
- /* Now much private space */
- pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
+ if (sz_pvt) {
+ pci->pvt_info = kzalloc(sz_pvt, GFP_KERNEL);
+ if (!pci->pvt_info)
+ goto free;
+ }
- pci->pvt_info = pvt;
pci->op_state = OP_ALLOC;
snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
return pci;
+
+free:
+ kfree(pci);
+ return NULL;
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 6d1ddecbf0da..59b0bedc9c24 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -15,11 +15,13 @@
#include "edac_module.h"
#include <ras/ras_event.h>
+#define OTHER_DETAIL_LEN 400
+
struct ghes_pvt {
struct mem_ctl_info *mci;
/* Buffers for the error handling routine */
- char other_detail[400];
+ char other_detail[OTHER_DETAIL_LEN];
char msg[80];
};
@@ -36,7 +38,7 @@ static struct ghes_pvt *ghes_pvt;
* This driver's representation of the system hardware, as collected
* from DMI.
*/
-struct ghes_hw_desc {
+static struct ghes_hw_desc {
int num_dimms;
struct dimm_info *dimms;
} ghes_hw;
@@ -235,8 +237,34 @@ static void ghes_scan_system(void)
system_scanned = true;
}
+static int print_mem_error_other_detail(const struct cper_sec_mem_err *mem, char *msg,
+ const char *location, unsigned int len)
+{
+ u32 n;
+
+ if (!msg)
+ return 0;
+
+ n = 0;
+ len -= 1;
+
+ n += scnprintf(msg + n, len - n, "APEI location: %s ", location);
+
+ if (!(mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS))
+ goto out;
+
+ n += scnprintf(msg + n, len - n, "status(0x%016llx): ", mem->error_status);
+ n += scnprintf(msg + n, len - n, "%s ", cper_mem_err_status_str(mem->error_status));
+
+out:
+ msg[n] = '\0';
+
+ return n;
+}
+
void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
{
+ struct cper_mem_err_compact cmem;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
struct ghes_pvt *pvt;
@@ -292,60 +320,10 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* Error type, mapped on e->msg */
if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+ u8 etype = mem_err->error_type;
+
p = pvt->msg;
- switch (mem_err->error_type) {
- case 0:
- p += sprintf(p, "Unknown");
- break;
- case 1:
- p += sprintf(p, "No error");
- break;
- case 2:
- p += sprintf(p, "Single-bit ECC");
- break;
- case 3:
- p += sprintf(p, "Multi-bit ECC");
- break;
- case 4:
- p += sprintf(p, "Single-symbol ChipKill ECC");
- break;
- case 5:
- p += sprintf(p, "Multi-symbol ChipKill ECC");
- break;
- case 6:
- p += sprintf(p, "Master abort");
- break;
- case 7:
- p += sprintf(p, "Target abort");
- break;
- case 8:
- p += sprintf(p, "Parity Error");
- break;
- case 9:
- p += sprintf(p, "Watchdog timeout");
- break;
- case 10:
- p += sprintf(p, "Invalid address");
- break;
- case 11:
- p += sprintf(p, "Mirror Broken");
- break;
- case 12:
- p += sprintf(p, "Memory Sparing");
- break;
- case 13:
- p += sprintf(p, "Scrub corrected error");
- break;
- case 14:
- p += sprintf(p, "Scrub uncorrected error");
- break;
- case 15:
- p += sprintf(p, "Physical Memory Map-out event");
- break;
- default:
- p += sprintf(p, "reserved error (%d)",
- mem_err->error_type);
- }
+ p += snprintf(p, sizeof(pvt->msg), "%s", cper_mem_err_type_str(etype));
} else {
strcpy(pvt->msg, "unknown error");
}
@@ -362,52 +340,19 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* Memory error location, mapped on e->location */
p = e->location;
- if (mem_err->validation_bits & CPER_MEM_VALID_NODE)
- p += sprintf(p, "node:%d ", mem_err->node);
- if (mem_err->validation_bits & CPER_MEM_VALID_CARD)
- p += sprintf(p, "card:%d ", mem_err->card);
- if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
- p += sprintf(p, "module:%d ", mem_err->module);
- if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
- p += sprintf(p, "rank:%d ", mem_err->rank);
- if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
- p += sprintf(p, "bank:%d ", mem_err->bank);
- if (mem_err->validation_bits & CPER_MEM_VALID_BANK_GROUP)
- p += sprintf(p, "bank_group:%d ",
- mem_err->bank >> CPER_MEM_BANK_GROUP_SHIFT);
- if (mem_err->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
- p += sprintf(p, "bank_address:%d ",
- mem_err->bank & CPER_MEM_BANK_ADDRESS_MASK);
- if (mem_err->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
- u32 row = mem_err->row;
-
- row |= cper_get_mem_extension(mem_err->validation_bits, mem_err->extended);
- p += sprintf(p, "row:%d ", row);
- }
- if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
- p += sprintf(p, "col:%d ", mem_err->column);
- if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
- p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
+ cper_mem_err_pack(mem_err, &cmem);
+ p += cper_mem_err_location(&cmem, p);
+
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
- const char *bank = NULL, *device = NULL;
struct dimm_info *dimm;
- dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
- if (bank != NULL && device != NULL)
- p += sprintf(p, "DIMM location:%s %s ", bank, device);
- else
- p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
- mem_err->mem_dev_handle);
-
+ p += cper_dimm_err_location(&cmem, p);
dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle);
if (dimm) {
e->top_layer = dimm->idx;
strcpy(e->label, dimm->label);
}
}
- if (mem_err->validation_bits & CPER_MEM_VALID_CHIP_ID)
- p += sprintf(p, "chipID: %d ",
- mem_err->extended >> CPER_MEM_CHIP_ID_SHIFT);
if (p > e->location)
*(p - 1) = '\0';
@@ -416,78 +361,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
- p += snprintf(p, sizeof(pvt->other_detail),
- "APEI location: %s ", e->location);
- if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
- u64 status = mem_err->error_status;
-
- p += sprintf(p, "status(0x%016llx): ", (long long)status);
- switch ((status >> 8) & 0xff) {
- case 1:
- p += sprintf(p, "Error detected internal to the component ");
- break;
- case 16:
- p += sprintf(p, "Error detected in the bus ");
- break;
- case 4:
- p += sprintf(p, "Storage error in DRAM memory ");
- break;
- case 5:
- p += sprintf(p, "Storage error in TLB ");
- break;
- case 6:
- p += sprintf(p, "Storage error in cache ");
- break;
- case 7:
- p += sprintf(p, "Error in one or more functional units ");
- break;
- case 8:
- p += sprintf(p, "component failed self test ");
- break;
- case 9:
- p += sprintf(p, "Overflow or undervalue of internal queue ");
- break;
- case 17:
- p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
- break;
- case 18:
- p += sprintf(p, "Improper access error ");
- break;
- case 19:
- p += sprintf(p, "Access to a memory address which is not mapped to any component ");
- break;
- case 20:
- p += sprintf(p, "Loss of Lockstep ");
- break;
- case 21:
- p += sprintf(p, "Response not associated with a request ");
- break;
- case 22:
- p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
- break;
- case 23:
- p += sprintf(p, "Detection of a PATH_ERROR ");
- break;
- case 25:
- p += sprintf(p, "Bus operation timeout ");
- break;
- case 26:
- p += sprintf(p, "A read was issued to data that has been poisoned ");
- break;
- default:
- p += sprintf(p, "reserved ");
- break;
- }
- }
- if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
- p += sprintf(p, "requestorID: 0x%016llx ",
- (long long)mem_err->requestor_id);
- if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
- p += sprintf(p, "responderID: 0x%016llx ",
- (long long)mem_err->responder_id);
- if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID)
- p += sprintf(p, "targetID: 0x%016llx ",
- (long long)mem_err->responder_id);
+ p += print_mem_error_other_detail(mem_err, p, e->location, OTHER_DETAIL_LEN);
if (p > pvt->other_detail)
*(p - 1) = '\0';
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 324a46b8479b..f5d82518c15e 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -244,11 +244,6 @@ static inline u32 i5100_nrecmema_rank(u32 a)
return a >> 8 & ((1 << 3) - 1);
}
-static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
-{
- return a & ((1 << 8) - 1);
-}
-
static inline u32 i5100_nrecmemb_cas(u32 a)
{
return a >> 16 & ((1 << 13) - 1);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 67f7bc3fe5b3..5bf92298554d 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -609,13 +609,6 @@ static int mpc85xx_l2_err_remove(struct platform_device *op)
}
static const struct of_device_id mpc85xx_l2_err_of_match[] = {
-/* deprecate the fsl,85.. forms in the future, 2.6.30? */
- { .compatible = "fsl,8540-l2-cache-controller", },
- { .compatible = "fsl,8541-l2-cache-controller", },
- { .compatible = "fsl,8544-l2-cache-controller", },
- { .compatible = "fsl,8548-l2-cache-controller", },
- { .compatible = "fsl,8555-l2-cache-controller", },
- { .compatible = "fsl,8568-l2-cache-controller", },
{ .compatible = "fsl,mpc8536-l2-cache-controller", },
{ .compatible = "fsl,mpc8540-l2-cache-controller", },
{ .compatible = "fsl,mpc8541-l2-cache-controller", },
@@ -644,13 +637,6 @@ static struct platform_driver mpc85xx_l2_err_driver = {
};
static const struct of_device_id mpc85xx_mc_err_of_match[] = {
-/* deprecate the fsl,85.. forms in the future, 2.6.30? */
- { .compatible = "fsl,8540-memory-controller", },
- { .compatible = "fsl,8541-memory-controller", },
- { .compatible = "fsl,8544-memory-controller", },
- { .compatible = "fsl,8548-memory-controller", },
- { .compatible = "fsl,8555-memory-controller", },
- { .compatible = "fsl,8568-memory-controller", },
{ .compatible = "fsl,mpc8536-memory-controller", },
{ .compatible = "fsl,mpc8540-memory-controller", },
{ .compatible = "fsl,mpc8541-memory-controller", },
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index f05ff02c0656..1cee64b80a7e 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys DDR ECC Driver
* This driver is based on ppc4xx_edac.c drivers
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details
*/
#include <linux/edac.h>
@@ -164,6 +151,11 @@
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
+/* ECC error count register definitions */
+#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
+#define ECC_ERRCNT_UECNT_SHIFT 16
+#define ECC_ERRCNT_CECNT_MASK 0xFFFF
+
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
@@ -423,15 +415,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
base = priv->baseaddr;
p = &priv->stat;
+ regval = readl(base + ECC_ERRCNT_OFST);
+ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
+ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
+ if (!p->ce_cnt)
+ goto ue_err;
+
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
- if (!p->ce_cnt)
- goto ue_err;
-
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 7197f9fa0245..54081403db4f 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -501,7 +501,7 @@ static int xgene_edac_mc_remove(struct xgene_edac_mc_ctx *mcu)
#define MEMERR_L2C_L2ESRA_PAGE_OFFSET 0x0804
/*
- * Processor Module Domain (PMD) context - Context for a pair of processsors.
+ * Processor Module Domain (PMD) context - Context for a pair of processors.
* Each PMD consists of 2 CPUs and a shared L2 cache. Each CPU consists of
* its own L1 cache.
*/
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 54be88167c60..f3b3953cac83 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
+ unsigned long flags;
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
+ spin_lock_irqsave(&card->lock, flags);
fw_destroy_nodes(card);
+ spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9f89c17730b1..708e417200f4 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1500,6 +1500,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
+ struct client *e_client;
switch (status) {
/* expected: */
@@ -1516,9 +1517,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
}
e->phy_packet.data[0] = packet->timestamp;
+ e_client = e->client;
queue_event(e->client, &e->event, &e->phy_packet,
sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
- client_put(e->client);
+ client_put(e_client);
}
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index b63d55f5ebd3..f40c81534381 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
+/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
{
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
card->local_node = NULL;
- spin_unlock_irqrestore(&card->lock, flags);
}
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
@@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
+ spin_lock_irqsave(&card->lock, flags);
+
/*
* If the selfID buffer is not the immediate successor of the
* previously processed one, we cannot reliably compare the
@@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_retries = 0;
}
- spin_lock_irqsave(&card->lock, flags);
-
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ac487c96bb71..6c20815cc8d1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
- struct fw_transaction *t;
+ struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(t, &card->transaction_list, link) {
- if (t == transaction) {
- if (!try_cancel_split_timeout(t)) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter == transaction) {
+ if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
- list_del_init(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&t->link != &card->transaction_list) {
+ if (t) {
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
- struct fw_transaction *t;
+ struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(t, &card->transaction_list, link) {
- if (t->node_id == source && t->tlabel == tlabel) {
- if (!try_cancel_split_timeout(t)) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter->node_id == source && iter->tlabel == tlabel) {
+ if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
- list_del_init(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&t->link == &card->transaction_list) {
+ if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 85cd379fd383..60051c0cabea 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
- struct sbp2_orb *orb;
+ struct sbp2_orb *orb = NULL, *iter;
struct sbp2_status status;
unsigned long flags;
@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
/* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&lu->tgt->lock, flags);
- list_for_each_entry(orb, &lu->orb_list, link) {
+ list_for_each_entry(iter, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
- STATUS_GET_ORB_LOW(status) == orb->request_bus) {
- orb->rcode = RCODE_COMPLETE;
- list_del(&orb->link);
+ STATUS_GET_ORB_LOW(status) == iter->request_bus) {
+ iter->rcode = RCODE_COMPLETE;
+ list_del(&iter->link);
+ orb = iter;
break;
}
}
spin_unlock_irqrestore(&lu->tgt->lock, flags);
- if (&orb->link != &lu->orb_list) {
+ if (orb) {
orb->callback(orb, &status);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index cf6fed6dec77..45600acc0f45 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -49,7 +49,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct {
__le32 value_low;
__le32 value_high;
- } rate[0];
+ } rate[];
#define RATE_TO_U64(X) \
({ \
typeof(X) x = (X); \
@@ -210,7 +210,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
- sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+ rate_cmp_func, NULL);
}
clk->rate_discrete = rate_discrete;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 46118300a4d1..e17c6568344d 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -679,7 +679,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
- scmi_clear_channel(info, cinfo);
+ if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+ scmi_clear_channel(info, cinfo);
return;
}
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 734f1eeee161..8302a2b4aeeb 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -405,8 +405,8 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
return 0;
}
-static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
- struct scmi_xfer *xfer)
+static struct scmi_shared_mem __iomem *
+get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
{
if (!chan)
return NULL;
@@ -419,7 +419,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
int ret;
mutex_lock(&channel->mu);
@@ -436,7 +436,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
shmem_fetch_response(shmem, xfer);
}
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index e48108e694f8..7dad6f57d970 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
ctl->alg_region = *alg_region;
if (subname && dsp->fw_ver >= 2) {
ctl->subname_len = subname_len;
- ctl->subname = kmemdup(subname,
- strlen(subname) + 1, GFP_KERNEL);
+ ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
if (!ctl->subname) {
ret = -ENOMEM;
goto err_ctl;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2c3dac5ecb36..4720ba98cec3 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -91,6 +91,18 @@ config EFI_SOFT_RESERVE
If unsure, say Y.
+config EFI_DXE_MEM_ATTRIBUTES
+ bool "Adjust memory attributes in EFISTUB"
+ depends on EFI && EFI_STUB && X86
+ default y
+ help
+ UEFI specification does not guarantee all memory to be
+ accessible for both write and execute as the kernel expects
+ it to be.
+ Use DXE services to check and alter memory protection
+ attributes during boot via EFISTUB to ensure that memory
+ ranges used by the kernel are writable and executable.
+
config EFI_PARAMS_FROM_FDT
bool
help
@@ -284,3 +296,34 @@ config EFI_CUSTOM_SSDT_OVERLAYS
See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
information.
+
+config EFI_DISABLE_RUNTIME
+ bool "Disable EFI runtime services support by default"
+ default y if PREEMPT_RT
+ help
+ Allow to disable the EFI runtime services support by default. This can
+ already be achieved by using the efi=noruntime option, but it could be
+ useful to have this default without any kernel command line parameter.
+
+ The EFI runtime services are disabled by default when PREEMPT_RT is
+ enabled, because measurements have shown that some EFI functions calls
+ might take too much time to complete, causing large latencies which is
+ an issue for Real-Time kernels.
+
+ This default can be overridden by using the efi=runtime option.
+
+config EFI_COCO_SECRET
+ bool "EFI Confidential Computing Secret Area Support"
+ depends on EFI
+ help
+ Confidential Computing platforms (such as AMD SEV) allow the
+ Guest Owner to securely inject secrets during guest VM launch.
+ The secrets are placed in a designated EFI reserved memory area.
+
+ In order to use the secrets in the kernel, the location of the secret
+ area (as published in the EFI config table) must be kept.
+
+ If you say Y here, the address of the EFI secret area will be kept
+ for usage inside the kernel. This will allow the
+ virt/coco/efi_secret module to access the secrets, which in turn
+ allows userspace programs to access the injected secrets.
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 6ec8edec6329..e4e5ea7ce910 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -211,7 +211,33 @@ const char *cper_mem_err_type_str(unsigned int etype)
}
EXPORT_SYMBOL_GPL(cper_mem_err_type_str);
-static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
+const char *cper_mem_err_status_str(u64 status)
+{
+ switch ((status >> 8) & 0xff) {
+ case 1: return "Error detected internal to the component";
+ case 4: return "Storage error in DRAM memory";
+ case 5: return "Storage error in TLB";
+ case 6: return "Storage error in cache";
+ case 7: return "Error in one or more functional units";
+ case 8: return "Component failed self test";
+ case 9: return "Overflow or undervalue of internal queue";
+ case 16: return "Error detected in the bus";
+ case 17: return "Virtual address not found on IO-TLB or IO-PDIR";
+ case 18: return "Improper access error";
+ case 19: return "Access to a memory address which is not mapped to any component";
+ case 20: return "Loss of Lockstep";
+ case 21: return "Response not associated with a request";
+ case 22: return "Bus parity error - must also set the A, C, or D Bits";
+ case 23: return "Detection of a protocol error";
+ case 24: return "Detection of a PATH_ERROR";
+ case 25: return "Bus operation timeout";
+ case 26: return "A read was issued to data that has been poisoned";
+ default: return "Reserved";
+ }
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_status_str);
+
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
{
u32 len, n;
@@ -221,51 +247,51 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
n = 0;
len = CPER_REC_LEN;
if (mem->validation_bits & CPER_MEM_VALID_NODE)
- n += scnprintf(msg + n, len - n, "node: %d ", mem->node);
+ n += scnprintf(msg + n, len - n, "node:%d ", mem->node);
if (mem->validation_bits & CPER_MEM_VALID_CARD)
- n += scnprintf(msg + n, len - n, "card: %d ", mem->card);
+ n += scnprintf(msg + n, len - n, "card:%d ", mem->card);
if (mem->validation_bits & CPER_MEM_VALID_MODULE)
- n += scnprintf(msg + n, len - n, "module: %d ", mem->module);
+ n += scnprintf(msg + n, len - n, "module:%d ", mem->module);
if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
- n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank);
+ n += scnprintf(msg + n, len - n, "rank:%d ", mem->rank);
if (mem->validation_bits & CPER_MEM_VALID_BANK)
- n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank);
+ n += scnprintf(msg + n, len - n, "bank:%d ", mem->bank);
if (mem->validation_bits & CPER_MEM_VALID_BANK_GROUP)
- n += scnprintf(msg + n, len - n, "bank_group: %d ",
+ n += scnprintf(msg + n, len - n, "bank_group:%d ",
mem->bank >> CPER_MEM_BANK_GROUP_SHIFT);
if (mem->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
- n += scnprintf(msg + n, len - n, "bank_address: %d ",
+ n += scnprintf(msg + n, len - n, "bank_address:%d ",
mem->bank & CPER_MEM_BANK_ADDRESS_MASK);
if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
- n += scnprintf(msg + n, len - n, "device: %d ", mem->device);
+ n += scnprintf(msg + n, len - n, "device:%d ", mem->device);
if (mem->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
u32 row = mem->row;
row |= cper_get_mem_extension(mem->validation_bits, mem->extended);
- n += scnprintf(msg + n, len - n, "row: %d ", row);
+ n += scnprintf(msg + n, len - n, "row:%d ", row);
}
if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
- n += scnprintf(msg + n, len - n, "column: %d ", mem->column);
+ n += scnprintf(msg + n, len - n, "column:%d ", mem->column);
if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
- n += scnprintf(msg + n, len - n, "bit_position: %d ",
+ n += scnprintf(msg + n, len - n, "bit_position:%d ",
mem->bit_pos);
if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
- n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ",
+ n += scnprintf(msg + n, len - n, "requestor_id:0x%016llx ",
mem->requestor_id);
if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
- n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ",
+ n += scnprintf(msg + n, len - n, "responder_id:0x%016llx ",
mem->responder_id);
if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
- n += scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
+ n += scnprintf(msg + n, len - n, "target_id:0x%016llx ",
mem->target_id);
if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
- n += scnprintf(msg + n, len - n, "chip_id: %d ",
+ n += scnprintf(msg + n, len - n, "chip_id:%d ",
mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
return n;
}
-static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
{
u32 len, n;
const char *bank = NULL, *device = NULL;
@@ -334,7 +360,9 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
return;
}
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
- printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+ printk("%s error_status: %s (0x%016llx)\n",
+ pfx, cper_mem_err_status_str(mem->error_status),
+ mem->error_status);
if (mem->validation_bits & CPER_MEM_VALID_PA)
printk("%s""physical_address: 0x%016llx\n",
pfx, mem->physical_addr);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5502e176d51b..860534bcfdac 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -46,6 +46,9 @@ struct efi __read_mostly efi = {
#ifdef CONFIG_LOAD_UEFI_KEYS
.mokvar_table = EFI_INVALID_TABLE_ADDR,
#endif
+#ifdef CONFIG_EFI_COCO_SECRET
+ .coco_secret = EFI_INVALID_TABLE_ADDR,
+#endif
};
EXPORT_SYMBOL(efi);
@@ -66,7 +69,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
-static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT);
+static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
static int __init setup_noefi(char *arg)
{
disable_runtime = true;
@@ -422,6 +425,11 @@ static int __init efisubsys_init(void)
if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
efi_debugfs_init();
+#ifdef CONFIG_EFI_COCO_SECRET
+ if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
+ platform_device_register_simple("efi_secret", 0, NULL, 0);
+#endif
+
return 0;
err_remove_group:
@@ -529,6 +537,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
#ifdef CONFIG_LOAD_UEFI_KEYS
{LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
#endif
+#ifdef CONFIG_EFI_COCO_SECRET
+ {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
+#endif
{},
};
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 4b5b2403b3a0..0131e3aaa605 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -117,7 +117,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- efi_loaded_image_t *image)
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
{
const int slack = TEXT_OFFSET - 5 * PAGE_SIZE;
int alloc_size = MAX_UNCOMP_KERNEL_SIZE + EFI_PHYS_ALIGN;
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 9cc556013d08..577173ee1f83 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -83,7 +83,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- efi_loaded_image_t *image)
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
@@ -100,7 +101,15 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- if (!efi_nokaslr) {
+ efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
+ void *p;
+
+ if (efi_nokaslr) {
+ efi_info("KASLR disabled on kernel command line\n");
+ } else if (efi_bs_call(handle_protocol, image_handle,
+ &li_fixed_proto, &p) == EFI_SUCCESS) {
+ efi_info("Image placement fixed by loader\n");
+ } else {
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
@@ -111,8 +120,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
status);
efi_nokaslr = true;
}
- } else {
- efi_info("KASLR disabled on kernel command line\n");
}
}
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index da93864d7abc..f515394cce6e 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -198,7 +198,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
status = handle_kernel_image(&image_addr, &image_size,
&reserve_addr,
&reserve_size,
- image);
+ image, handle);
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
goto fail_free_screeninfo;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index edb77b0621ea..b0ae0a454404 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -36,6 +36,9 @@ extern bool efi_novamap;
extern const efi_system_table_t *efi_system_table;
+typedef union efi_dxe_services_table efi_dxe_services_table_t;
+extern const efi_dxe_services_table_t *efi_dxe_table;
+
efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg);
@@ -44,6 +47,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_is_native() (true)
#define efi_bs_call(func, ...) efi_system_table->boottime->func(__VA_ARGS__)
#define efi_rt_call(func, ...) efi_system_table->runtime->func(__VA_ARGS__)
+#define efi_dxe_call(func, ...) efi_dxe_table->func(__VA_ARGS__)
#define efi_table_attr(inst, attr) (inst->attr)
#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
@@ -329,6 +333,76 @@ union efi_boot_services {
} mixed_mode;
};
+typedef enum {
+ EfiGcdMemoryTypeNonExistent,
+ EfiGcdMemoryTypeReserved,
+ EfiGcdMemoryTypeSystemMemory,
+ EfiGcdMemoryTypeMemoryMappedIo,
+ EfiGcdMemoryTypePersistent,
+ EfiGcdMemoryTypeMoreReliable,
+ EfiGcdMemoryTypeMaximum
+} efi_gcd_memory_type_t;
+
+typedef struct {
+ efi_physical_addr_t base_address;
+ u64 length;
+ u64 capabilities;
+ u64 attributes;
+ efi_gcd_memory_type_t gcd_memory_type;
+ void *image_handle;
+ void *device_handle;
+} efi_gcd_memory_space_desc_t;
+
+/*
+ * EFI DXE Services table
+ */
+union efi_dxe_services_table {
+ struct {
+ efi_table_hdr_t hdr;
+ void *add_memory_space;
+ void *allocate_memory_space;
+ void *free_memory_space;
+ void *remove_memory_space;
+ efi_status_t (__efiapi *get_memory_space_descriptor)(efi_physical_addr_t,
+ efi_gcd_memory_space_desc_t *);
+ efi_status_t (__efiapi *set_memory_space_attributes)(efi_physical_addr_t,
+ u64, u64);
+ void *get_memory_space_map;
+ void *add_io_space;
+ void *allocate_io_space;
+ void *free_io_space;
+ void *remove_io_space;
+ void *get_io_space_descriptor;
+ void *get_io_space_map;
+ void *dispatch;
+ void *schedule;
+ void *trust;
+ void *process_firmware_volume;
+ void *set_memory_space_capabilities;
+ };
+ struct {
+ efi_table_hdr_t hdr;
+ u32 add_memory_space;
+ u32 allocate_memory_space;
+ u32 free_memory_space;
+ u32 remove_memory_space;
+ u32 get_memory_space_descriptor;
+ u32 set_memory_space_attributes;
+ u32 get_memory_space_map;
+ u32 add_io_space;
+ u32 allocate_io_space;
+ u32 free_io_space;
+ u32 remove_io_space;
+ u32 get_io_space_descriptor;
+ u32 get_io_space_map;
+ u32 dispatch;
+ u32 schedule;
+ u32 trust;
+ u32 process_firmware_volume;
+ u32 set_memory_space_capabilities;
+ } mixed_mode;
+};
+
typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
union efi_uga_draw_protocol {
@@ -720,6 +794,13 @@ union efi_tcg2_protocol {
} mixed_mode;
};
+struct riscv_efi_boot_protocol {
+ u64 revision;
+
+ efi_status_t (__efiapi *get_boot_hartid)(struct riscv_efi_boot_protocol *,
+ unsigned long *boot_hartid);
+};
+
typedef union efi_load_file_protocol efi_load_file_protocol_t;
typedef union efi_load_file_protocol efi_load_file2_protocol_t;
@@ -865,7 +946,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- efi_loaded_image_t *image);
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle);
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
unsigned long fdt_addr,
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 724155b9e10d..715f37479154 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -56,6 +56,7 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long random_seed)
{
unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long total_mirrored_slots = 0;
unsigned long buff_size;
efi_status_t status;
efi_memory_desc_t *memory_map;
@@ -86,8 +87,14 @@ efi_status_t efi_random_alloc(unsigned long size,
slots = get_entry_num_slots(md, size, ilog2(align));
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
+ total_mirrored_slots += slots;
}
+ /* consider only mirrored slots for randomization if any exist */
+ if (total_mirrored_slots > 0)
+ total_slots = total_mirrored_slots;
+
/* find a random number between 0 and total_slots */
target_slot = (total_slots * (u64)(random_seed & U32_MAX)) >> 32;
@@ -107,6 +114,10 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_physical_addr_t target;
unsigned long pages;
+ if (total_mirrored_slots > 0 &&
+ !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
+ continue;
+
if (target_slot >= MD_NUM_SLOTS(md)) {
target_slot -= MD_NUM_SLOTS(md);
continue;
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 9c460843442f..9e85e58d1f27 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -21,9 +21,9 @@
#define MIN_KIMG_ALIGN SZ_4M
#endif
-typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
+typedef void __noreturn (*jump_kernel_func)(unsigned long, unsigned long);
-static u32 hartid;
+static unsigned long hartid;
static int get_boot_hartid_from_fdt(void)
{
@@ -47,14 +47,31 @@ static int get_boot_hartid_from_fdt(void)
return 0;
}
+static efi_status_t get_boot_hartid_from_efi(void)
+{
+ efi_guid_t boot_protocol_guid = RISCV_EFI_BOOT_PROTOCOL_GUID;
+ struct riscv_efi_boot_protocol *boot_protocol;
+ efi_status_t status;
+
+ status = efi_bs_call(locate_protocol, &boot_protocol_guid, NULL,
+ (void **)&boot_protocol);
+ if (status != EFI_SUCCESS)
+ return status;
+ return efi_call_proto(boot_protocol, get_boot_hartid, &hartid);
+}
+
efi_status_t check_platform_features(void)
{
+ efi_status_t status;
int ret;
- ret = get_boot_hartid_from_fdt();
- if (ret) {
- efi_err("/chosen/boot-hartid missing or invalid!\n");
- return EFI_UNSUPPORTED;
+ status = get_boot_hartid_from_efi();
+ if (status != EFI_SUCCESS) {
+ ret = get_boot_hartid_from_fdt();
+ if (ret) {
+ efi_err("Failed to get boot hartid!\n");
+ return EFI_UNSUPPORTED;
+ }
}
return EFI_SUCCESS;
}
@@ -80,7 +97,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- efi_loaded_image_t *image)
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
{
unsigned long kernel_size = 0;
unsigned long preferred_addr;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 01ddd4502e28..b14e88ccefca 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -22,6 +22,7 @@
#define MAXMEM_X86_64_4LEVEL (1ull << 46)
const efi_system_table_t *efi_system_table;
+const efi_dxe_services_table_t *efi_dxe_table;
extern u32 image_offset;
static efi_loaded_image_t *image = NULL;
@@ -211,9 +212,110 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
+static void
+adjust_memory_range_protection(unsigned long start, unsigned long size)
+{
+ efi_status_t status;
+ efi_gcd_memory_space_desc_t desc;
+ unsigned long end, next;
+ unsigned long rounded_start, rounded_end;
+ unsigned long unprotect_start, unprotect_size;
+ int has_system_memory = 0;
+
+ if (efi_dxe_table == NULL)
+ return;
+
+ rounded_start = rounddown(start, EFI_PAGE_SIZE);
+ rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+
+ /*
+ * Don't modify memory region attributes, they are
+ * already suitable, to lower the possibility to
+ * encounter firmware bugs.
+ */
+
+ for (end = start + size; start < end; start = next) {
+
+ status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
+
+ if (status != EFI_SUCCESS)
+ return;
+
+ next = desc.base_address + desc.length;
+
+ /*
+ * Only system memory is suitable for trampoline/kernel image placement,
+ * so only this type of memory needs its attributes to be modified.
+ */
+
+ if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory ||
+ (desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0)
+ continue;
+
+ unprotect_start = max(rounded_start, (unsigned long)desc.base_address);
+ unprotect_size = min(rounded_end, next) - unprotect_start;
+
+ status = efi_dxe_call(set_memory_space_attributes,
+ unprotect_start, unprotect_size,
+ EFI_MEMORY_WB);
+
+ if (status != EFI_SUCCESS) {
+ efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %d\n",
+ unprotect_start,
+ unprotect_start + unprotect_size,
+ (int)status);
+ }
+ }
+}
+
+/*
+ * Trampoline takes 2 pages and can be loaded in first megabyte of memory
+ * with its end placed between 128k and 640k where BIOS might start.
+ * (see arch/x86/boot/compressed/pgtable_64.c)
+ *
+ * We cannot find exact trampoline placement since memory map
+ * can be modified by UEFI, and it can alter the computed address.
+ */
+
+#define TRAMPOLINE_PLACEMENT_BASE ((128 - 8)*1024)
+#define TRAMPOLINE_PLACEMENT_SIZE (640*1024 - (128 - 8)*1024)
+
+void startup_32(struct boot_params *boot_params);
+
+static void
+setup_memory_protection(unsigned long image_base, unsigned long image_size)
+{
+ /*
+ * Allow execution of possible trampoline used
+ * for switching between 4- and 5-level page tables
+ * and relocated kernel image.
+ */
+
+ adjust_memory_range_protection(TRAMPOLINE_PLACEMENT_BASE,
+ TRAMPOLINE_PLACEMENT_SIZE);
+
+#ifdef CONFIG_64BIT
+ if (image_base != (unsigned long)startup_32)
+ adjust_memory_range_protection(image_base, image_size);
+#else
+ /*
+ * Clear protection flags on a whole range of possible
+ * addresses used for KASLR. We don't need to do that
+ * on x86_64, since KASLR/extraction is performed after
+ * dedicated identity page tables are built and we only
+ * need to remove possible protection on relocated image
+ * itself disregarding further relocations.
+ */
+ adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
+ KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
+#endif
+}
+
static const efi_char16_t apple[] = L"Apple";
-static void setup_quirks(struct boot_params *boot_params)
+static void setup_quirks(struct boot_params *boot_params,
+ unsigned long image_base,
+ unsigned long image_size)
{
efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
efi_table_attr(efi_system_table, fw_vendor);
@@ -222,6 +324,9 @@ static void setup_quirks(struct boot_params *boot_params)
if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
retrieve_apple_device_properties(boot_params);
}
+
+ if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES))
+ setup_memory_protection(image_base, image_size);
}
/*
@@ -341,8 +446,6 @@ static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
asm("hlt");
}
-void startup_32(struct boot_params *boot_params);
-
void __noreturn efi_stub_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg,
struct boot_params *boot_params);
@@ -677,11 +780,17 @@ unsigned long efi_main(efi_handle_t handle,
efi_status_t status;
efi_system_table = sys_table_arg;
-
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
+ efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
+ if (efi_dxe_table &&
+ efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
+ efi_warn("Ignoring DXE services table: invalid signature\n");
+ efi_dxe_table = NULL;
+ }
+
/*
* If the kernel isn't already loaded at a suitable address,
* relocate it.
@@ -791,7 +900,7 @@ unsigned long efi_main(efi_handle_t handle,
setup_efi_pci(boot_params);
- setup_quirks(boot_params);
+ setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start);
status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index b8b1473a5b1e..f87ff3fa8a53 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -178,3 +178,22 @@ discussed but the idea is to provide a low-level access point
for debugging and hacking and to expose all lines without the
need of any exporting. Also provide ample ammunition to shoot
oneself in the foot, because this is debugfs after all.
+
+
+Moving over to immutable irq_chip structures
+
+Most of the gpio chips implementing interrupt support rely on gpiolib
+intercepting some of the irq_chip callbacks, preventing the structures
+from being made read-only and forcing duplication of structures that
+should otherwise be unique.
+
+The solution is to call into the gpiolib code when needed (resource
+management, enable/disable or unmask/mask callbacks), and to let the
+core code know about that by exposing a flag (IRQCHIP_IMMUTABLE) in
+the irq_chip structure. The irq_chip structure can then be made unique
+and const.
+
+A small number of drivers have been converted (pl061, tegra186, msm,
+amd, apple), and can be used as examples of how to proceed with this
+conversion. Note that drivers using the generic irqchip framework
+cannot be converted yet, but watch this space!
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 4c1f9e1091b7..2db19cd640a4 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long flags;
unsigned int on, off;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
if (val > UINT_MAX + 1ULL)
@@ -871,13 +874,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvpwm->chip.dev = dev;
mvpwm->chip.ops = &mvebu_pwm_ops;
mvpwm->chip.npwm = mvchip->chip.ngpio;
- /*
- * There may already be some PWM allocated, so we can't force
- * mvpwm->chip.base to a fixed point like mvchip->chip.base.
- * So, we let pwmchip_add() do the numbering and take the next free
- * region.
- */
- mvpwm->chip.base = -1;
spin_lock_init(&mvpwm->lock);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d2fe76f3f34f..8726921a1129 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -762,11 +762,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
+
if (bitmap_empty(trigger, gc->ngpio))
return false;
- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
-
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 4ecab700f23f..6464056cb6ae 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -52,7 +52,6 @@ struct pl061 {
void __iomem *base;
struct gpio_chip gc;
- struct irq_chip irq_chip;
int parent_irq;
#ifdef CONFIG_PM
@@ -241,6 +240,8 @@ static void pl061_irq_mask(struct irq_data *d)
gpioie = readb(pl061->base + GPIOIE) & ~mask;
writeb(gpioie, pl061->base + GPIOIE);
raw_spin_unlock(&pl061->lock);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
static void pl061_irq_unmask(struct irq_data *d)
@@ -250,6 +251,8 @@ static void pl061_irq_unmask(struct irq_data *d)
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
+ gpiochip_enable_irq(gc, d->hwirq);
+
raw_spin_lock(&pl061->lock);
gpioie = readb(pl061->base + GPIOIE) | mask;
writeb(gpioie, pl061->base + GPIOIE);
@@ -283,6 +286,24 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
return irq_set_irq_wake(pl061->parent_irq, state);
}
+static void pl061_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip pl061_irq_chip = {
+ .irq_ack = pl061_irq_ack,
+ .irq_mask = pl061_irq_mask,
+ .irq_unmask = pl061_irq_unmask,
+ .irq_set_type = pl061_irq_type,
+ .irq_set_wake = pl061_irq_set_wake,
+ .irq_print_chip = pl061_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
@@ -315,13 +336,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
/*
* irq_chip support
*/
- pl061->irq_chip.name = dev_name(dev);
- pl061->irq_chip.irq_ack = pl061_irq_ack;
- pl061->irq_chip.irq_mask = pl061_irq_mask;
- pl061->irq_chip.irq_unmask = pl061_irq_unmask;
- pl061->irq_chip.irq_set_type = pl061_irq_type;
- pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
-
writeb(0, pl061->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
if (!irq)
@@ -329,7 +343,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
pl061->parent_irq = irq;
girq = &pl061->gc.irq;
- girq->chip = &pl061->irq_chip;
+ gpio_irq_chip_set_chip(girq, &pl061_irq_chip);
girq->parent_handler = pl061_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 8e5d87984a48..41c31b10ae84 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- bitmap_copy(bits, chip->value_map, gc->ngpio);
+ bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
mutex_unlock(&chip->lock);
return 0;
@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- bitmap_copy(chip->value_map, bits, gc->ngpio);
+ bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
mutex_unlock(&chip->lock);
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 031fe105b58e..84c4f1e9fb0c 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -80,7 +80,6 @@ struct tegra_gpio_soc {
struct tegra_gpio {
struct gpio_chip gpio;
- struct irq_chip intc;
unsigned int num_irq;
unsigned int *irq;
@@ -372,6 +371,8 @@ static void tegra186_irq_mask(struct irq_data *data)
value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
value &= ~TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT;
writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ gpiochip_disable_irq(&gpio->gpio, data->hwirq);
}
static void tegra186_irq_unmask(struct irq_data *data)
@@ -385,6 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
if (WARN_ON(base == NULL))
return;
+ gpiochip_enable_irq(&gpio->gpio, data->hwirq);
+
value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
value |= TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT;
writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
@@ -456,6 +459,24 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
+static void tegra186_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip tegra186_gpio_irq_chip = {
+ .irq_ack = tegra186_irq_ack,
+ .irq_mask = tegra186_irq_mask,
+ .irq_unmask = tegra186_irq_unmask,
+ .irq_set_type = tegra186_irq_set_type,
+ .irq_set_wake = tegra186_irq_set_wake,
+ .irq_print_chip = tegra186_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -760,15 +781,8 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.of_xlate = tegra186_gpio_of_xlate;
#endif /* CONFIG_OF_GPIO */
- gpio->intc.name = dev_name(&pdev->dev);
- gpio->intc.irq_ack = tegra186_irq_ack;
- gpio->intc.irq_mask = tegra186_irq_mask;
- gpio->intc.irq_unmask = tegra186_irq_unmask;
- gpio->intc.irq_set_type = tegra186_irq_set_type;
- gpio->intc.irq_set_wake = tegra186_irq_set_wake;
-
irq = &gpio->gpio.irq;
- irq->chip = &gpio->intc;
+ gpio_irq_chip_set_chip(irq, &tegra186_gpio_irq_chip);
irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 20780c35da1b..23cddb265a0d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
{
struct vf610_gpio_port *port = gpiochip_get_data(chip);
unsigned long mask = BIT(gpio);
+ u32 val;
- if (port->sdata && port->sdata->have_paddr)
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
+ if (port->sdata && port->sdata->have_paddr) {
+ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ val |= mask;
+ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+ }
vf610_gpio_set(chip, gpio, value);
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 47455810bdb9..e6534ea1eaa7 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
struct irq_domain *parent;
struct device_node *irq_parent;
- struct fwnode_handle *fwnode;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev)
}
parent = irq_find_host(irq_parent);
+ of_node_put(irq_parent);
if (!parent) {
dev_err(dev, "No IRQ parent domain\n");
return -ENODEV;
}
- fwnode = of_node_to_fwnode(irq_parent);
- of_node_put(irq_parent);
-
ret = bgpio_init(&priv->gpio_chip, dev, 4,
priv->base + GPIO_IDATA,
priv->base + GPIO_OSET,
@@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq = &priv->gpio_chip.irq;
girq->chip = irq_chip;
- girq->fwnode = fwnode;
+ girq->fwnode = of_node_to_fwnode(dev->of_node);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index a5495ad31c9c..c2523ac26fac 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
* controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
{
struct gpio_chip *chip;
acpi_handle handle;
@@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
* as it is intended for use outside of the GPIO layer (in a similar fashion to
* gpiod_get_index() for example) it also holds a reference to the GPIO device.
*/
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label)
{
struct gpio_desc *gpio;
int ret;
@@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
{
const char *controller, *pin_str;
- int len, pin;
+ unsigned int pin;
char *endp;
+ int len;
controller = ignore_wake;
while (controller) {
@@ -354,13 +355,13 @@ err:
static bool acpi_gpio_irq_is_wake(struct device *parent,
struct acpi_resource_gpio *agpio)
{
- int pin = agpio->pin_table[0];
+ unsigned int pin = agpio->pin_table[0];
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
- dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+ dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
struct acpi_gpio_event *event;
irq_handler_t handler = NULL;
struct gpio_desc *desc;
- int ret, pin, irq;
+ unsigned int pin;
+ int ret, irq;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
pin = agpio->pin_table[0];
if (pin <= 255) {
- char ev_name[5];
- sprintf(ev_name, "_%c%02hhX",
+ char ev_name[8];
+ sprintf(ev_name, "_%c%02X",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
@@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
length = min_t(u16, agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
- int pin = agpio->pin_table[i];
+ unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
bool found;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ae1ce319cd78..7e5e51d49d09 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -910,7 +910,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
i, &start);
of_property_read_u32_index(np, "gpio-reserved-ranges",
i + 1, &count);
- if (start >= chip->ngpio || start + count >= chip->ngpio)
+ if (start >= chip->ngpio || start + count > chip->ngpio)
continue;
bitmap_clear(chip->valid_mask, start, count);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e59884cc12a7..690035124faa 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ /*
+ * Avoid race condition with other code, which tries to lookup
+ * an IRQ before the irqchip has been properly registered,
+ * i.e. while gpiochip is still being brought up.
+ */
+ if (!gc->irq.initialized)
+ return -EPROBE_DEFER;
+#endif
+
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -1423,19 +1433,21 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
return irq_create_mapping(domain, offset);
}
-static int gpiochip_irq_reqres(struct irq_data *d)
+int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
return gpiochip_reqres_irq(gc, d->hwirq);
}
+EXPORT_SYMBOL(gpiochip_irq_reqres);
-static void gpiochip_irq_relres(struct irq_data *d)
+void gpiochip_irq_relres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
gpiochip_relres_irq(gc, d->hwirq);
}
+EXPORT_SYMBOL(gpiochip_irq_relres);
static void gpiochip_irq_mask(struct irq_data *d)
{
@@ -1475,6 +1487,11 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
{
struct irq_chip *irqchip = gc->irq.chip;
+ if (irqchip->flags & IRQCHIP_IMMUTABLE)
+ return;
+
+ chip_warn(gc, "not an immutable chip, please consider fixing it!\n");
+
if (!irqchip->irq_request_resources &&
!irqchip->irq_release_resources) {
irqchip->irq_request_resources = gpiochip_irq_reqres;
@@ -1591,6 +1608,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gpiochip_set_irq_hooks(gc);
+ /*
+ * Using barrier() here to prevent compiler from reordering
+ * gc->irq.initialized before initialization of above
+ * GPIO chip irq members.
+ */
+ barrier();
+
+ gc->irq.initialized = true;
+
acpi_gpiochip_request_interrupts(gc);
return 0;
@@ -1633,7 +1659,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
irq_domain_remove(gc->irq.domain);
}
- if (irqchip) {
+ if (irqchip && !(irqchip->flags & IRQCHIP_IMMUTABLE)) {
if (irqchip->irq_request_resources == gpiochip_irq_reqres) {
irqchip->irq_request_resources = NULL;
irqchip->irq_release_resources = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 5b393622f592..a0f0a17e224f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index cdf0818088b3..7606e3b6361e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1342,9 +1342,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 0e12315fa0cb..98ac53ee6bb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1046,6 +1046,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
}
/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+}
+
+/**
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 970b065e9a6b..d0d0ea565e3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
@@ -709,6 +711,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
if (parser->ctx) {
+ mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
@@ -1157,6 +1160,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
{
int i, r;
+ /* TODO: Investigate why we still need the context lock */
+ mutex_unlock(&p->ctx->lock);
+
for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
@@ -1167,32 +1173,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
}
}
- return 0;
+out:
+ mutex_lock(&p->ctx->lock);
+ return r;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1368,6 +1376,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
+
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 5981c7d9bd48..c317078d1afd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
+ mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@@ -295,6 +296,7 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
{
struct amdgpu_device *adev = ctx->adev;
enum amd_dpm_forced_level level;
+ u32 current_stable_pstate;
int r;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
@@ -303,6 +305,10 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
goto done;
}
+ r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
+ if (r || (stable_pstate == current_stable_pstate))
+ goto done;
+
switch (stable_pstate) {
case AMDGPU_CTX_STABLE_PSTATE_NONE:
level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -357,6 +363,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
drm_dev_exit(idx);
}
+ mutex_destroy(&ctx->lock);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index d0cbfcea90f7..142f2f87d44c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -49,6 +49,7 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
+ struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3987ecb24ef4..49f734137f15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5733,7 +5733,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU)
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
return;
#endif
if (adev->gmc.xgmi.connected_to_cpu)
@@ -5749,7 +5749,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU)
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
return;
#endif
if (adev->gmc.xgmi.connected_to_cpu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index bb1c025d9001..46ef57b07c15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy,
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
* number of VMIDs assigned to the HWS, which is also the default.
*/
-int hws_max_conc_proc = 8;
+int hws_max_conc_proc = -1;
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
@@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else
adev->in_s3 = true;
- r = amdgpu_device_suspend(drm_dev, true);
- if (r)
- return r;
- if (!adev->in_s0ix)
- r = amdgpu_asic_reset(adev);
- return r;
+ return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+
+ return 0;
}
static int amdgpu_pmops_resume(struct device *dev)
@@ -2390,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
return amdgpu_device_resume(drm_dev, true);
}
+static int amdgpu_runtime_idle_check_display(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ if (adev->mode_info.num_crtc) {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ /* XXX: Return busy if any displays are connected to avoid
+ * possible display wakeups after runtime resume due to
+ * hotplug events in case any displays were connected while
+ * the GPU was in suspend. Remove this once that is fixed.
+ */
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->status == connector_status_connected) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ mutex_unlock(&drm_dev->mode_config.mutex);
+
+ if (ret)
+ return ret;
+
+ if (amdgpu_device_has_dc_support(adev)) {
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->state->active)
+ ret = -EBUSY;
+ drm_modeset_unlock(&crtc->mutex);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
+ }
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2402,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
+ ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ return ret;
+
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2511,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
return -EBUSY;
}
- if (amdgpu_device_has_dc_support(adev)) {
- struct drm_crtc *crtc;
-
- drm_for_each_crtc(crtc, drm_dev) {
- drm_modeset_lock(&crtc->mutex, NULL);
- if (crtc->state->active)
- ret = -EBUSY;
- drm_modeset_unlock(&crtc->mutex);
- if (ret < 0)
- break;
- }
-
- } else {
- struct drm_connector *list_connector;
- struct drm_connector_list_iter iter;
-
- mutex_lock(&drm_dev->mode_config.mutex);
- drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
-
- drm_connector_list_iter_begin(drm_dev, &iter);
- drm_for_each_connector_iter(list_connector, &iter) {
- if (list_connector->dpms == DRM_MODE_DPMS_ON) {
- ret = -EBUSY;
- break;
- }
- }
-
- drm_connector_list_iter_end(&iter);
-
- drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
- mutex_unlock(&drm_dev->mode_config.mutex);
- }
-
- if (ret == -EBUSY)
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ ret = amdgpu_runtime_idle_check_display(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
@@ -2575,6 +2615,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.prepare = amdgpu_pmops_prepare,
.complete = amdgpu_pmops_complete,
.suspend = amdgpu_pmops_suspend,
+ .suspend_noirq = amdgpu_pmops_suspend_noirq,
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
.thaw = amdgpu_pmops_thaw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 8fe939976224..28a736c507bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
- while (queue_bit-- >= 0) {
+ while (--queue_bit >= 0) {
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index ca2cfb65f976..a66a0881a934 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -561,9 +561,15 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
/*
* noretry = 0 will cause kfd page fault tests fail
* for some ASICs, so set default to 1 for these ASICs.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25731719c627..940752488330 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1284,6 +1284,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
*/
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct dma_fence *fence = NULL;
struct amdgpu_bo *abo;
int r;
@@ -1303,7 +1304,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
if (bo->resource->mem_type != TTM_PL_VRAM ||
- !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+ !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+ adev->in_suspend || adev->shutdown)
return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 5320bb0883d8..317d80209e95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -300,8 +300,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned int ring_size, struct amdgpu_irq_src *irq_src,
- unsigned int irq_type, unsigned int prio,
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio,
atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f99093f2ebc7..a0ee828a4a97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,7 @@
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index e2fde88aaf5e..f06fb7f882e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -159,6 +159,7 @@
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
+#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging {
uint32_t size;
};
+struct amdgpu_fw_shared_smu_interface_info {
+ uint8_t smu_interface_type;
+ uint8_t padding[3];
+};
+
struct amdgpu_fw_shared {
uint32_t present_flag_0;
uint8_t pad[44];
@@ -287,6 +293,7 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
struct amdgpu_fw_shared_fw_logging fw_log;
+ struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index a025f080aa6a..5e3756643da3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
+#include <xen/xen.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@@ -710,7 +711,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ /* passthrough mode exclus sriov mod */
+ if (is_virtual_machine() && !xen_initial_domain())
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f4c6accd3226..9426e252d8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3293,7 +3293,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3429,7 +3429,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3454,7 +3454,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -7689,6 +7689,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
preempt_disable();
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 46d4bf27ebbb..b8cfcc6b1125 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3c1d440824a7..7c956cf21bc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -814,7 +814,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
@@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
+ * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
+ * seen any issue on the DF 3.0.2 series platform.
+ */
+ if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
+ dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
+ return 0;
+ }
+
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 344d819b4c1b..979da6f510e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU &&
- adev->gmc.real_vram_size > adev->gmc.aper_size) {
+ if ((adev->flags & AMD_IS_APU) &&
+ adev->gmc.real_vram_size > adev->gmc.aper_size &&
+ !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ca9841d5669f..1932a3e4af7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 431742eb7811..6009fbfdcc19 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1456,7 +1456,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
*/
/* check whether both host-gpu and gpu-gpu xgmi links exist */
- if ((adev->flags & AMD_IS_APU) ||
+ if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
(adev->gmc.xgmi.supported &&
adev->gmc.xgmi.connected_to_cpu)) {
adev->gmc.aper_base =
@@ -1721,7 +1721,7 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
- amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+ amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index dff54190b96c..f0fbcda76f5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
+#include "amdgpu_cs.h"
#include "amdgpu_vcn.h"
#include "amdgpu_pm.h"
#include "soc15.h"
@@ -1900,6 +1901,75 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.set_powergating_state = vcn_v1_0_set_powergating_state,
};
+/*
+ * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
+ * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
+ * before command submission as a workaround.
+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ uint64_t addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ int r;
+
+ addr &= AMDGPU_GMC_HOLE_MASK;
+ if (addr & 0x7) {
+ DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
+
+ bo = mapping->bo_va->base.bo;
+ if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+ return 0;
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
+ return r;
+ }
+
+ return r;
+}
+
+static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ uint32_t msg_lo = 0, msg_hi = 0;
+ int i, r;
+
+ if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_ib_get_value(ib, i);
+ uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+ if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ msg_lo = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ msg_hi = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+ r = vcn_v1_0_validate_bo(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
@@ -1910,6 +1980,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
+ .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + 6 + /* hdp invalidate / flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index c87263ed20ec..cb5f0a12333f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle)
cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
+ fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
+ fw_shared->smu_interface_info.smu_interface_type = 2;
+ else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
+ fw_shared->smu_interface_info.smu_interface_type = 1;
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -575,8 +580,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
- WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
- UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
@@ -1480,8 +1485,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
+ vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 039b90cdc3bc..45f0188c4273 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,6 +81,10 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
@@ -1134,13 +1138,24 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
+static bool aspm_support_quirk_check(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+ return true;
+#endif
+}
+
static void vi_program_aspm(struct amdgpu_device *adev)
{
u32 data, data1, orig;
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev))
+ if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
return;
if (adev->flags & AMD_IS_APU ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 339e12c94cff..62aa6c9d5123 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -483,15 +483,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
}
/* Verify module parameters regarding mapped process number*/
- if ((hws_max_conc_proc < 0)
- || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
- dev_err(kfd_device,
- "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
- hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
- kfd->vm_info.vmid_num_kfd);
+ if (hws_max_conc_proc >= 0)
+ kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+ else
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
- } else
- kfd->max_proc_per_quantum = hws_max_conc_proc;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
@@ -536,7 +531,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_doorbell_error;
}
- kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
+ if (amdgpu_use_xgmi_p2p)
+ kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
kfd->noretry = kfd->adev->gmc.noretry;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index acf4f7975850..198672264492 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
}
static void increment_queue_count(struct device_queue_manager *dqm,
- enum kfd_queue_type type)
+ struct qcm_process_device *qpd,
+ struct queue *q)
{
dqm->active_queue_count++;
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;
+
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count++;
+ qpd->mapped_gws_queue = true;
+ }
}
static void decrement_queue_count(struct device_queue_manager *dqm,
- enum kfd_queue_type type)
+ struct qcm_process_device *qpd,
+ struct queue *q)
{
dqm->active_queue_count--;
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;
+
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
}
/*
@@ -412,7 +426,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, qpd, q);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
- if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
- }
+ if (q->properties.is_active)
+ decrement_queue_count(dqm, qpd, q);
return retval;
}
@@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
* dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
- if (q->properties.is_active && !prev_active)
- increment_queue_count(dqm, q->properties.type);
- else if (!q->properties.is_active && prev_active)
- decrement_queue_count(dqm, q->properties.type);
-
- if (q->gws && !q->properties.is_gws) {
+ if (q->properties.is_active && !prev_active) {
+ increment_queue_count(dqm, &pdd->qpd, q);
+ } else if (!q->properties.is_active && prev_active) {
+ decrement_queue_count(dqm, &pdd->qpd, q);
+ } else if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count++;
pdd->qpd.mapped_gws_queue = true;
@@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
+ decrement_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
- decrement_queue_count(dqm, q->properties.type);
+ decrement_queue_count(dqm, qpd, q);
}
pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
@@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- increment_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count++;
- qpd->mapped_gws_queue = true;
- }
+ increment_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, &pdd->qpd, q);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- increment_queue_count(dqm, kq->queue->properties.type);
+ increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
- decrement_queue_count(dqm, kq->queue->properties.type);
+ decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
qpd->queue_count++;
if (q->properties.is_active) {
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, qpd, q);
execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
+ decrement_queue_count(dqm, qpd, q);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
}
/*
@@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- decrement_queue_count(dqm, kq->queue->properties.type);
+ decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
- }
+ if (q->properties.is_active)
+ decrement_queue_count(dqm, qpd, q);
dqm->total_queue_count--;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index deecccebe5b6..64f4a51cc880 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -749,6 +749,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
+ if (!event_waiters)
+ return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
init_wait(&event_waiters[i].wait);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9967a73d5b0f..8f58fc491b28 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data {
uint32_t priority;
uint32_t q_percent;
uint32_t doorbell_id;
- uint32_t is_gws;
+ uint32_t gws;
uint32_t sdma_id;
uint32_t eop_ring_buffer_size;
uint32_t ctx_save_restore_area_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 6eca9509f2e3..4f58e671d39b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd,
q_data->ctx_save_restore_area_size =
q->properties.ctx_save_restore_area_size;
+ q_data->gws = !!q->gws;
+
ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
if (ret) {
pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
@@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
struct kfd_criu_queue_priv_data *q_data)
{
qp->is_interop = false;
- qp->is_gws = q_data->is_gws;
qp->queue_percent = q_data->q_percent;
qp->priority = q_data->priority;
qp->queue_address = q_data->q_address;
@@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
- ret = -EINVAL;
+ goto exit;
}
+ if (q_data->gws)
+ ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
+
exit:
if (ret)
- pr_err("Failed to create queue (%d)\n", ret);
+ pr_err("Failed to restore queue (%d)\n", ret);
else
pr_debug("Queue id %d was restored successfully\n", queue_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index e4beebb1c80a..f2e1d506ba21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -247,15 +247,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
return ret;
}
- ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
- O_RDWR);
- if (ret < 0) {
- kfifo_free(&client->fifo);
- kfree(client);
- return ret;
- }
- *fd = ret;
-
init_waitqueue_head(&client->wait_queue);
spin_lock_init(&client->lock);
client->events = 0;
@@ -265,5 +256,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
list_add_rcu(&client->list, &dev->smi_clients);
spin_unlock(&dev->smi_lock);
+ ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
+ O_RDWR);
+ if (ret < 0) {
+ spin_lock(&dev->smi_lock);
+ list_del_rcu(&client->list);
+ spin_unlock(&dev->smi_lock);
+
+ synchronize_rcu();
+
+ kfifo_free(&client->fifo);
+ kfree(client);
+ return ret;
+ }
+ *fd = ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b30656959fd8..62139ff35476 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2714,7 +2714,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->mst_port)
+ if (aconnector->dc_link &&
+ aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -3972,7 +3973,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
max - min);
}
-static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
@@ -4003,7 +4004,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
- return rc ? 0 : 1;
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -9947,7 +9949,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
- (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
+ (dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6a908d736d6a..7e44b0429448 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -540,6 +540,12 @@ struct amdgpu_display_manager {
* cached backlight values.
*/
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
+ /**
+ * @actual_brightness:
+ *
+ * last successfully applied backlight values.
+ */
+ u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
};
enum dsc_clock_force_state {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index dfba6138f538..26feefbb8990 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
- if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+ if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
clk_mgr_dce->dprefclk_ss_percentage = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index edda572dc570..8be4c1970628 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -436,57 +436,84 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
struct integrated_info *bios_info,
const DpmClocks_315_t *clock_table)
{
- int i, j;
+ int i;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
-
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0;
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled);
+
+ /* Find highest fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
-
- bw_params->clk_table.num_entries = j + 1;
-
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
- if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
- clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
- max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
- max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
- } else {
- ASSERT(0);
- }
+ /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ int j;
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- int temp;
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]
+ && clock_table->DfPstateTable[j].FClk < min_fclk) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
+ }
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = 1;
- temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- if (temp)
- bw_params->clk_table.entries[i].dcfclk_mhz = temp;
- temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
- if (temp)
- bw_params->clk_table.entries[i].socclk_mhz = temp;
+ };
+
+ /* Make sure to include at least one entry and highest pstate */
+ if (max_pstate != min_pstate) {
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(
+ clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage);
+ bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(
+ clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = 1;
+ i++;
}
+ bw_params->clk_table.num_entries = i;
+
+ /* Include highest socclk */
+ if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk)
+ bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk;
+ /* Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ }
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 880ffea2afc6..2600313fea57 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -80,8 +80,8 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
-#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency
-#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ
+#define VBIOSSMC_MSG_GetDtbclkFreq 0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency
+#define VBIOSSMC_MSG_SetDtbClk 0x0A ///< Set dtb clock frequency, return frequemcy in MHZ
#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power
#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event
@@ -324,15 +324,26 @@ int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
return (dprefclk_get_mhz * 1000);
}
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_GetFclkFrequency,
+ VBIOSSMC_MSG_GetDtbclkFreq,
0);
}
return (fclk_get_mhz * 1000);
}
+
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn315_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDtbClk,
+ enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
index 66fa42f8dd18..5aa3275ac7d8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
@@ -37,6 +37,7 @@
#define NUM_SOC_VOLTAGE_LEVELS 4
#define NUM_DF_PSTATE_LEVELS 4
+
typedef struct {
uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
@@ -124,5 +125,6 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
#endif /* DAL_DC_315_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 702d00ce7da4..3121dd2d2a91 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz =
- dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+ /*clk_mgr->base.dccg->ref_dtbclk_khz =
+ dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
clk_mgr->base.base.bw_params = &dcn316_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f6e19efea756..c436db416708 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2389,6 +2389,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
+ if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
+ su_flags->bits.crtc_timing_adjust = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2650,6 +2652,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_infopacket)
stream->vrr_infopacket = *update->vrr_infopacket;
+ if (update->crtc_timing_adjust)
+ stream->adjust = *update->crtc_timing_adjust;
+
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -4051,3 +4056,17 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
+/*
+ * dc_extended_blank_supported: Decide whether extended blank is supported
+ *
+ * Extended blank is a freesync optimization feature to be enabled in the future.
+ * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
+ *
+ * @param [in] dc: Current DC state
+ * @return: Indicate whether extended blank is supported (true or false)
+ */
+bool dc_extended_blank_supported(struct dc *dc)
+{
+ return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
+ && dc->caps.zstate_support && dc->caps.is_apu;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index cb87dd643180..bbaa5abdf888 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -983,8 +983,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
destrictive = false;
}
}
- } else if (dc_is_hdmi_signal(link->local_sink->sink_signal))
- destrictive = true;
+ }
return destrictive;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 351081f574cb..95b5b5bfa1ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4440,7 +4440,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
&dpcd_pattern_type.value,
sizeof(dpcd_pattern_type));
- channel_count = dpcd_test_mode.bits.channel_count + 1;
+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
// read pattern periods for requested channels when sawTooth pattern is requested
if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
@@ -5216,6 +5216,62 @@ static void retrieve_cable_id(struct dc_link *link)
&link->dpcd_caps.cable_id, &usbc_cable_id);
}
+/* DPRX may take some time to respond to AUX messages after HPD asserted.
+ * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600).
+ */
+static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint8_t dpcd_data = 0;
+ uint64_t start_ts = 0;
+ uint64_t current_ts = 0;
+ uint64_t time_taken_ms = 0;
+ enum dc_connection_type type = dc_connection_none;
+
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ if (status != DC_OK) {
+ DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
+ __func__,
+ timeout_ms);
+ start_ts = dm_get_timestamp(link->ctx);
+
+ do {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none)
+ break;
+
+ dpcd_data = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ dpcd_data = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ current_ts = dm_get_timestamp(link->ctx);
+ time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000);
+ } while (status != DC_OK && time_taken_ms < timeout_ms);
+
+ DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s",
+ __func__,
+ (status == DC_OK) ? "succeeded" : "failed",
+ time_taken_ms,
+ (type == dc_connection_none) ? ". Unplugged." : ".");
+ }
+
+ return status;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -5251,6 +5307,15 @@ static bool retrieve_link_cap(struct dc_link *link)
dc_link_aux_try_to_configure_timeout(link->ddc,
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ /* Try to ensure AUX channel active before proceeding. */
+ if (link->dc->debug.aux_wake_wa.bits.enable_wa) {
+ uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms;
+
+ if (link->dc->debug.aux_wake_wa.bits.use_default_timeout)
+ timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS;
+ status = wa_try_to_wake_dprx(link, timeout_ms);
+ }
+
is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7af153434e9e..d251c3f3a714 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1685,8 +1685,8 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
- // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
- if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+ /*compare audio info*/
+ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4ffab7bb1098..9e79f60e6129 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -188,6 +188,7 @@ struct dc_caps {
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
+ bool zstate_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -339,6 +340,7 @@ struct dc_config {
bool is_asymmetric_memory;
bool is_single_rank_dimm;
bool use_pipe_ctx_sync_logic;
+ bool ignore_dpref_ss;
};
enum visual_confirm {
@@ -525,6 +527,22 @@ union dpia_debug_options {
uint32_t raw;
};
+/* AUX wake work around options
+ * 0: enable/disable work around
+ * 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
+ * 15-2: reserved
+ * 31-16: timeout in ms
+ */
+union aux_wake_wa_options {
+ struct {
+ uint32_t enable_wa : 1;
+ uint32_t use_default_timeout : 1;
+ uint32_t rsvd: 14;
+ uint32_t timeout_ms : 16;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
@@ -703,14 +721,15 @@ struct dc_debug_options {
bool enable_driver_sequence_debug;
enum det_size crb_alloc_policy;
int crb_alloc_policy_min_disp_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
bool enable_z9_disable_interface;
bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
#endif
bool apply_vendor_specific_lttpr_wa;
- bool ignore_dpref_ss;
+ bool extended_blank_optimization;
+ union aux_wake_wa_options aux_wake_wa;
uint8_t psr_power_use_phy_fsm;
};
@@ -1369,6 +1388,8 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
+bool dc_extended_blank_supported(struct dc *dc);
+
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 99a750f561f8..c4168c11257c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -131,6 +131,7 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
+ uint32_t crtc_timing_adjust : 1;
} bits;
uint32_t raw;
@@ -289,6 +290,7 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
+ struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c3e141c19a77..83fbea2df410 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1497,16 +1497,12 @@ void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
- /* Power gate DSCs */
- if (!is_optimized_init_done) {
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
- }
-
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1559,8 +1555,6 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -2056,7 +2050,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
{
struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
- struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
+ struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
unsigned int pclk;
@@ -2067,6 +2061,10 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
uint32_t dp_ref_clk_100hz =
dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
+ hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
+ if (!hw_crtc_timing)
+ return master;
+
if (dc->config.vblank_alignment_dto_params &&
dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
embedded_h_total =
@@ -2130,6 +2128,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
}
}
+
+ kfree(hw_crtc_timing);
return master;
}
@@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ab910deed481..b627c41713cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1857,6 +1857,7 @@ void dcn20_optimize_bandwidth(
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int i;
/* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub,
@@ -1873,6 +1874,17 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
+ if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+ && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+ && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+ pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+ pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+ }
+ }
/* increase compbuf size */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
@@ -2332,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d473708d5399..7802d603f796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags(
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
- ASSERT(0); /* NOT expected yet */
merge[i] = true; /* 4 -> 2 ODM */
} else if (split[i] == 0 && pipe->prev_odm_pipe) {
ASSERT(0); /* NOT expected yet */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 612732656772..faab59508d82 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index ed0a0e5fd805..f61ec8763844 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -547,6 +547,9 @@ void dcn30_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -624,8 +627,6 @@ void dcn30_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 3e6d6ebd199e..51c5f3685470 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -1042,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
hubbub31->detile_buf_size = det_size_kb * 1024;
hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
+
+ hubbub31->debug_test_index_pstate = 0x6;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 53b792b997b7..8ae6117953ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -54,6 +54,13 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset)
REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
}
+void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_SET(BLANK_OFFSET_1, 0, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized);
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -80,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
.hubp_in_blank = hubp1_in_blank,
+ .program_extended_blank = hubp31_program_extended_blank,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4be228680909..631d8ac63aa4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -199,6 +199,9 @@ void dcn31_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -248,8 +251,6 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -338,20 +339,20 @@ void dcn31_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
force_on = true; /* disable power gating */
if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
@@ -359,11 +360,11 @@ void dcn31_enable_power_gating_plane(
/* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index d7559e5a99ce..e708f07fe75a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
- if (dc->debug.disable_z10) {
- /*hw not support z10 or sw disable it*/
- dc->hwss.z10_restore = NULL;
- dc->hwss.z10_save_init = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 8afe2130d7c5..e05527a3a8ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -124,7 +124,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
static bool optc31_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
/* disable otg request until end of the first line
* in the vertical blank region
*/
@@ -138,6 +137,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ optc1_clear_optc_underflow(optc);
return true;
}
@@ -158,6 +158,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
OTG_BUSY, 0,
1, 100000);
+ /* clear the false state */
+ optc1_clear_optc_underflow(optc);
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 89b7b6b7254a..63934ecf6be8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2032,7 +2032,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ DC_FP_END();
// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
if (pipe_cnt == 0)
@@ -2232,6 +2234,7 @@ static bool dcn31_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
+ dc->caps.zstate_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 2f6122153bdb..f93af45aeab4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -722,8 +722,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
+ unsigned int optimized_min_dst_y_next_start_us;
plane_count = 0;
+ optimized_min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@@ -744,11 +746,22 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ if (dc_extended_blank_supported(dc)) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
+ && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
+ && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
+ optimized_min_dst_y_next_start_us =
+ context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
+ break;
+ }
+ }
+ }
/* zstate only supported on PWRSEQ0 and when there's <2 planes*/
if (link->link_index != 0 || stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -786,8 +799,6 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
-
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
@@ -843,6 +854,7 @@ void dcn20_calculate_dlg_params(
&pipes[pipe_idx].pipe);
pipe_idx++;
}
+ context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
}
static void swizzle_to_dml_params(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index e0fecf127bd5..53d760e169e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -1055,6 +1055,7 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
+ int blank_lines;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1080,6 +1081,18 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+ blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
+ if (blank_lines < 0)
+ blank_lines = 0;
+ if (blank_lines != 0) {
+ disp_dlg_regs->optimized_min_dst_y_next_start_us =
+ ((unsigned int) blank_lines * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+ disp_dlg_regs->optimized_min_dst_y_next_start =
+ (unsigned int)(((double) (dlg_vblank_start + blank_lines)) * dml_pow(2, 2));
+ } else {
+ // use unoptimized value
+ disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+ }
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 59f0a61c33cf..2df660cd8801 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -446,6 +446,8 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
+ unsigned int optimized_min_dst_y_next_start;
+ unsigned int optimized_min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index efc2339f1fa0..4385d19bc489 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -864,11 +864,11 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
+ is_dsc_possible = (min_slices_h <= max_slices_h);
+
if (pic_width % min_slices_h != 0)
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
if (min_slices_h == 0 && max_slices_h == 0)
is_dsc_possible = false;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index ab9939db8cea..44f167d2584f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,6 +33,7 @@
#define MAX_MTP_SLOT_COUNT 64
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#define TRAINING_AUX_RD_INTERVAL 100 //us
+#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
struct dc_link;
struct dc_stream_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index e45b7993c5c5..ad69d78c4ac3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -195,6 +195,9 @@ struct hubp_funcs {
void (*hubp_set_flip_int)(struct hubp *hubp);
+ void (*program_extended_blank)(struct hubp *hubp,
+ unsigned int min_dst_y_next_start_optimized);
+
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index b691aa45e84f..79bc207415bc 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -100,7 +100,8 @@ enum vsc_packet_revision {
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
-#define MASK_VTEM_MD0__RESERVED2 0x0C
+#define MASK_VTEM_MD0__QMS_EN 0x04
+#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@@ -109,7 +110,7 @@ enum vsc_packet_revision {
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
-#define MASK_VTEM_MD2__RESERVED3 0xF8
+#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 89fbee568be4..72e7b5d40af6 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -173,6 +173,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
+ /* Don't use baco for reset in S3.
+ * This is a workaround for some platforms
+ * where entering BACO during suspend
+ * seems to cause reboots or hangs.
+ * This might be related to the fact that BACO controls
+ * power to the whole GPU including devices like audio and USB.
+ * Powering down/up everything may adversely affect these other
+ * devices. Needs more investigation.
+ */
+ if (adev->in_s3)
+ return false;
mutex_lock(&adev->pm.mutex);
@@ -416,6 +427,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int i;
if (!adev->pm.dpm_enabled)
return;
@@ -423,6 +435,15 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
if (!pp_funcs->pm_compute_clocks)
return;
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
mutex_lock(&adev->pm.mutex);
pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
@@ -432,6 +453,20 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_dpm_compute_clocks(adev);
+ return;
+ }
+
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
@@ -442,6 +477,21 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_dpm_compute_clocks(adev);
+ return;
+ }
+
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
@@ -500,6 +550,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_pages_num(smu, size);
mutex_unlock(&adev->pm.mutex);
@@ -512,6 +565,9 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_channel_flag(smu, size);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 9613c6181c17..d3fe149d8476 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1028,16 +1028,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i = 0;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
amdgpu_dpm_get_active_displays(adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index caae54487f9c..633dab14f51c 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -3892,40 +3892,6 @@ static int si_set_boot_state(struct amdgpu_device *adev)
}
#endif
-static int si_set_powergating_by_smu(void *handle,
- uint32_t block_type,
- bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- if (!gate) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
-
- amdgpu_legacy_dpm_compute_clocks(handle);
- break;
- case AMD_IP_BLOCK_TYPE_VCE:
- if (!gate) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
-
- amdgpu_legacy_dpm_compute_clocks(handle);
- break;
- default:
- break;
- }
- return 0;
-}
-
static int si_set_sw_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
@@ -8125,7 +8091,6 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.print_power_state = &si_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
- .set_powergating_by_smu = &si_set_powergating_by_smu,
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index a2da46bf3985..71e9c6ce6b1a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1487,16 +1487,6 @@ static void pp_pm_compute_clocks(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
- int i = 0;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
if (!amdgpu_device_has_dc_support(adev)) {
amdgpu_dpm_get_active_displays(adev);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 9ddd8491ff00..ede71de2343d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
- data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
+ (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
+ data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
+ data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
+ data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index f1544755d8b4..f10a0256413e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1351,14 +1351,8 @@ static int smu_disable_dpms(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
- * the workaround which always reset the asic in suspend.
- * It's likely that workaround will be dropped in the future.
- * Then the change here should be dropped together.
- */
bool use_baco = !smu->is_apu &&
- (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
+ ((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 7bfac029e513..b81711c4ff33 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -991,7 +991,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
return -EINVAL;
}
- if (sclk_min && sclk_max) {
+ if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) {
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 007e5a282f67..2145b08f9534 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -78,6 +78,7 @@ config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
depends on OF
select DRM_KMS_HELPER
+ select DRM_DP_HELPER
select EXTCON
help
ITE IT6505 DisplayPort bridge chip driver.
diff --git a/drivers/gpu/drm/dp/drm_dp_mst_topology.c b/drivers/gpu/drm/dp/drm_dp_mst_topology.c
index 11300b53d24f..7a7cc44686f9 100644
--- a/drivers/gpu/drm/dp/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/dp/drm_dp_mst_topology.c
@@ -4852,6 +4852,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
+ kfree(mst_edid);
}
/**
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 026e4e29a0f3..9a2cfab3a177 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -244,21 +244,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (panel)
*panel = NULL;
- /**
- * Devices can also be child nodes when we also control that device
- * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
- *
- * Lookup for a child node of the given parent that isn't either port
- * or ports.
- */
- for_each_available_child_of_node(np, remote) {
- if (of_node_name_eq(remote, "port") ||
- of_node_name_eq(remote, "ports"))
- continue;
-
- goto of_find_panel_or_bridge;
- }
-
/*
* of_graph_get_remote_node() produces a noisy error message if port
* node isn't found and the absence of the port is a legit case here,
@@ -269,8 +254,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
return -ENODEV;
remote = of_graph_get_remote_node(np, port, endpoint);
-
-of_find_panel_or_bridge:
if (!remote)
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 7616a3906b9e..1b774dcfb281 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -367,6 +367,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
}
}
+static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
+ const u32 *mmioaddr, u32 mmio_count,
+ int header_ver, u8 dmc_id)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ u32 start_range, end_range;
+ int i;
+
+ if (dmc_id >= DMC_FW_MAX) {
+ drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
+ return false;
+ }
+
+ if (header_ver == 1) {
+ start_range = DMC_MMIO_START_RANGE;
+ end_range = DMC_MMIO_END_RANGE;
+ } else if (dmc_id == DMC_FW_MAIN) {
+ start_range = TGL_MAIN_MMIO_START;
+ end_range = TGL_MAIN_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 13) {
+ start_range = ADLP_PIPE_MMIO_START;
+ end_range = ADLP_PIPE_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ start_range = TGL_PIPE_MMIO_START(dmc_id);
+ end_range = TGL_PIPE_MMIO_END(dmc_id);
+ } else {
+ drm_warn(&i915->drm, "Unknown mmio range for sanity check");
+ return false;
+ }
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
+ return false;
+ }
+
+ return true;
+}
+
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
@@ -436,6 +474,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
+ dmc_header->header_ver, dmc_id)) {
+ drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
+ return 0;
+ }
+
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index d667657e3606..f868db8be02a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -4383,13 +4383,20 @@ intel_dp_update_420(struct intel_dp *intel_dp)
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct edid *edid;
+ bool vrr_capable;
intel_dp_unset_edid(intel_dp);
edid = intel_dp_get_edid(intel_dp);
connector->detect_edid = edid;
+ vrr_capable = intel_vrr_is_capable(&connector->base);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+ connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
+ drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
+
intel_dp_update_dfp(intel_dp, edid);
intel_dp_update_420(intel_dp);
@@ -4422,6 +4429,9 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->dfp.ycbcr_444_to_420 = false;
connector->base.ycbcr_420_allowed = false;
+
+ drm_connector_set_vrr_capable_property(&connector->base,
+ false);
}
static int
@@ -4572,14 +4582,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
int num_modes = 0;
edid = intel_connector->detect_edid;
- if (edid) {
+ if (edid)
num_modes = intel_connector_update_modes(connector, edid);
- if (intel_vrr_is_capable(connector))
- drm_connector_set_vrr_capable_property(connector,
- true);
- }
-
/* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
intel_connector->panel.fixed_mode) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 97cf3cac0105..fb6cf30ee628 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -97,6 +97,14 @@
#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
return false;
}
+ /*
+ * If we don't have HDR static metadata there is no way to
+ * runtime detect used range for nits based control. For now
+ * do not use Intel proprietary eDP backlight control if we
+ * don't have this data in panel EDID. In case we find panel
+ * which supports only nits based control, but doesn't provide
+ * HDR static metadata we need to start maintaining table of
+ * ranges for such panels.
+ */
+ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ BIT(HDMI_STATIC_METADATA_TYPE1))) {
+ drm_info(&i915->drm,
+ "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
+ return false;
+ }
+
panel->backlight.edp.intel.sdr_uses_aux =
tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
@@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
.get = intel_dp_aux_vesa_get_backlight,
};
-enum intel_dp_aux_backlight_modparam {
- INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
- INTEL_DP_AUX_BACKLIGHT_OFF = 0,
- INTEL_DP_AUX_BACKLIGHT_ON = 1,
- INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
- INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
-};
-
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 87f4af3fd523..3e61a8936245 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1037,7 +1037,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
const struct intel_crtc_state *crtc_state;
struct intel_fbc *fbc = plane->fbc;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index bff8c2d73cdf..6c9e6e7f0afd 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -887,6 +887,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /* Wa_16011303918:adl-p */
+ if (crtc_state->vrr.enable &&
+ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+ return false;
+ }
+
+ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -900,12 +914,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- return false;
+ goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
- return false;
+ goto unsupported;
}
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -914,25 +928,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
- return false;
- }
-
- if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
- return false;
- }
-
- /* Wa_16011303918:adl-p */
- if (crtc_state->vrr.enable &&
- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, not compatible with HW stepping + VRR\n");
- return false;
+ goto unsupported;
}
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
+
+unsupported:
+ crtc_state->enable_psr2_sel_fetch = false;
+ return false;
}
void intel_psr_compute_config(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d42f437149c9..6ca8929cf6e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1252,14 +1252,12 @@ static void *reloc_iomap(struct i915_vma *batch,
* Only attempt to pin the batch buffer to ggtt if the current batch
* is not inside ggtt, or the batch buffer is not misplaced.
*/
- if (!i915_is_ggtt(batch->vm)) {
+ if (!i915_is_ggtt(batch->vm) ||
+ !i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) {
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
- } else if (i915_vma_is_map_and_fenceable(batch)) {
- __i915_vma_pin(batch);
- vma = batch;
}
if (vma == ERR_PTR(-EDEADLK))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3ea243d414d..0c5c43852e24 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* mmap ioctl is disallowed for all discrete platforms,
* and for all platforms with GRAPHICS_VER > 12.
*/
- if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
return -EOPNOTSUPP;
if (args->flags & ~(I915_MMAP_WC))
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 82713264b96c..b7c6d4462ec5 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -806,7 +806,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
__intel_engine_reset(engine, stalled_mask & engine->mask);
local_bh_enable();
- intel_uc_reset(&gt->uc, true);
+ intel_uc_reset(&gt->uc, ALL_ENGINES);
intel_ggtt_restore_fences(gt->ggtt);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index bf7079480d47..2488d1197f3e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -438,7 +438,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc);
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
void intel_guc_submission_reset_prepare(struct intel_guc *guc);
-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
void intel_guc_submission_reset_finish(struct intel_guc *guc);
void intel_guc_submission_cancel_requests(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1ce7e04aa837..28f9aac0201d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1590,9 +1590,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
-static void __guc_reset_context(struct intel_context *ce, bool stalled)
+static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
{
- bool local_stalled;
+ bool guilty;
struct i915_request *rq;
unsigned long flags;
u32 head;
@@ -1620,7 +1620,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
if (!intel_context_is_pinned(ce))
goto next_context;
- local_stalled = false;
+ guilty = false;
rq = intel_context_find_active_request(ce);
if (!rq) {
head = ce->ring->tail;
@@ -1628,14 +1628,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
}
if (i915_request_started(rq))
- local_stalled = true;
+ guilty = stalled & ce->engine->mask;
GEM_BUG_ON(i915_active_is_idle(&ce->active));
head = intel_ring_wrap(ce->ring, rq->head);
- __i915_request_reset(rq, local_stalled && stalled);
+ __i915_request_reset(rq, guilty);
out_replay:
- guc_reset_state(ce, head, local_stalled && stalled);
+ guc_reset_state(ce, head, guilty);
next_context:
if (i != number_children)
ce = list_next_entry(ce, parallel.child_link);
@@ -1645,7 +1645,7 @@ next_context:
intel_context_put(parent);
}
-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{
struct intel_context *ce;
unsigned long index;
@@ -4013,7 +4013,7 @@ static void guc_context_replay(struct intel_context *ce)
{
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
- __guc_reset_context(ce, true);
+ __guc_reset_context(ce, ce->engine->mask);
tasklet_hi_schedule(&sched_engine->tasklet);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index da199aa6989f..8eb34de2f20c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -593,7 +593,7 @@ sanitize:
__uc_sanitize(uc);
}
-void intel_uc_reset(struct intel_uc *uc, bool stalled)
+void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
{
struct intel_guc *guc = &uc->guc;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 866b462821c0..a8f38c2c60e2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc);
void intel_uc_driver_remove(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
-void intel_uc_reset(struct intel_uc *uc, bool stalled);
+void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
void intel_uc_reset_finish(struct intel_uc *uc);
void intel_uc_cancel_requests(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3c87d77d2cf6..fe960c204362 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4345,12 +4345,12 @@
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
-#define DISP_POS_Y_MASK REG_GENMASK(31, 0)
+#define DISP_POS_Y_MASK REG_GENMASK(31, 16)
#define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y))
#define DISP_POS_X_MASK REG_GENMASK(15, 0)
#define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x))
#define _DSPASIZE 0x70190
-#define DISP_HEIGHT_MASK REG_GENMASK(31, 0)
+#define DISP_HEIGHT_MASK REG_GENMASK(31, 16)
#define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h))
#define DISP_WIDTH_MASK REG_GENMASK(15, 0)
#define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w))
@@ -5152,7 +5152,7 @@
#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
-#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
+#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
_SEL_FETCH_PLANE_BASE_1_A, \
@@ -5501,6 +5501,22 @@
/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
#define DMC_MMIO_START_RANGE 0x80000
#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
+
+#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
+ _TGL_PIPEB_MMIO_START)
+
+#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
+ _TGL_PIPEB_MMIO_END)
+
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 94fcdb7bd21d..eeaa8d0d0407 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1605,17 +1605,17 @@ void i915_vma_close(struct i915_vma *vma)
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- struct intel_gt *gt = vma->vm->gt;
-
- spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link);
- spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
+ struct intel_gt *gt = vma->vm->gt;
+
+ spin_lock_irq(&gt->closed_lock);
if (i915_vma_is_closed(vma))
__i915_vma_remove_closed(vma);
+ spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_release(struct kref *ref)
@@ -1641,6 +1641,7 @@ static void force_unbind(struct i915_vma *vma)
static void release_references(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
+ struct intel_gt *gt = vma->vm->gt;
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1650,7 +1651,9 @@ static void release_references(struct i915_vma *vma)
rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
+ spin_lock_irq(&gt->closed_lock);
__i915_vma_remove_closed(vma);
+ spin_unlock_irq(&gt->closed_lock);
__i915_vma_put(vma);
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 87428fb23d9f..a2277a0d6d06 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
struct imx_hdmi *hdmi;
+ int ret;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
hdmi->bridge = of_drm_find_bridge(np);
if (!hdmi->bridge) {
dev_err(hdmi->dev, "Unable to find bridge\n");
+ dw_hdmi_remove(hdmi->hdmi);
return -ENODEV;
}
- return component_add(&pdev->dev, &dw_hdmi_imx_ops);
+ ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
+ if (ret)
+ dw_hdmi_remove(hdmi->hdmi);
+
+ return ret;
}
static int dw_hdmi_imx_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index e5078d03020d..fb0e951248f6 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
+ if (!channel->edid)
+ return -ENOMEM;
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 06cb1a59b9bc..63ba2ad84679 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ drm_mode_destroy(connector->dev, mode);
return ret;
+ }
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 83c31b2ad865..ccc4fcf7a630 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL, 0x1ffffffffULL);
+ "gpu", 0x100000000ULL, SZ_4G);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 89cfd84760d7..8706bcdd1472 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
{}
};
-#ifdef CONFIG_PM
-static int adreno_resume(struct device *dev)
+static int adreno_runtime_resume(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_resume(gpu);
}
-static int active_submits(struct msm_gpu *gpu)
+static int adreno_runtime_suspend(struct device *dev)
{
- int active_submits;
- mutex_lock(&gpu->active_lock);
- active_submits = gpu->active_submits;
- mutex_unlock(&gpu->active_lock);
- return active_submits;
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ /*
+ * We should be holding a runpm ref, which will prevent
+ * runtime suspend. In the system suspend path, we've
+ * already waited for active jobs to complete.
+ */
+ WARN_ON_ONCE(gpu->active_submits);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ /*
+ * Shut down the scheduler before we force suspend, so that
+ * suspend isn't racing with scheduler kthread feeding us
+ * more work.
+ *
+ * Note, we just want to park the thread, and let any jobs
+ * that are already on the hw queue complete normally, as
+ * opposed to the drm_sched_stop() path used for handling
+ * faulting/timed-out jobs. We can't really cancel any jobs
+ * already on the hw queue without racing with the GPU.
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_park(sched->thread);
+ }
}
-static int adreno_suspend(struct device *dev)
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_unpark(sched->thread);
+ }
+}
+
+static int adreno_system_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
- int remaining;
+ int remaining, ret;
+
+ suspend_scheduler(gpu);
remaining = wait_event_timeout(gpu->retire_event,
- active_submits(gpu) == 0,
+ gpu->active_submits == 0,
msecs_to_jiffies(1000));
if (remaining == 0) {
dev_err(dev, "Timeout waiting for GPU to suspend\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- return gpu->funcs->pm_suspend(gpu);
+ ret = pm_runtime_force_suspend(dev);
+out:
+ if (ret)
+ resume_scheduler(gpu);
+
+ return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+ resume_scheduler(dev_to_gpu(dev));
+ return pm_runtime_force_resume(dev);
}
-#endif
static const struct dev_pm_ops adreno_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+ RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
};
static struct platform_driver adreno_driver = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c515b7cf922c..c61b5b283f08 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -54,87 +54,87 @@ struct dpu_intr_reg {
* When making changes be sure to sync with dpu_hw_intr_reg
*/
static const struct dpu_intr_reg dpu_intr_set[] = {
- {
+ [MDP_SSPP_TOP0_INTR] = {
MDP_SSPP_TOP0_OFF+INTR_CLEAR,
MDP_SSPP_TOP0_OFF+INTR_EN,
MDP_SSPP_TOP0_OFF+INTR_STATUS
},
- {
+ [MDP_SSPP_TOP0_INTR2] = {
MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
MDP_SSPP_TOP0_OFF+INTR2_EN,
MDP_SSPP_TOP0_OFF+INTR2_STATUS
},
- {
+ [MDP_SSPP_TOP0_HIST_INTR] = {
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
},
- {
+ [MDP_INTF0_INTR] = {
MDP_INTF_0_OFF+INTF_INTR_CLEAR,
MDP_INTF_0_OFF+INTF_INTR_EN,
MDP_INTF_0_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF1_INTR] = {
MDP_INTF_1_OFF+INTF_INTR_CLEAR,
MDP_INTF_1_OFF+INTF_INTR_EN,
MDP_INTF_1_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF2_INTR] = {
MDP_INTF_2_OFF+INTF_INTR_CLEAR,
MDP_INTF_2_OFF+INTF_INTR_EN,
MDP_INTF_2_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF3_INTR] = {
MDP_INTF_3_OFF+INTF_INTR_CLEAR,
MDP_INTF_3_OFF+INTF_INTR_EN,
MDP_INTF_3_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF4_INTR] = {
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
MDP_INTF_4_OFF+INTF_INTR_EN,
MDP_INTF_4_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF5_INTR] = {
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
MDP_INTF_5_OFF+INTF_INTR_EN,
MDP_INTF_5_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_AD4_0_INTR] = {
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
},
- {
+ [MDP_AD4_1_INTR] = {
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
},
- {
+ [MDP_INTF0_7xxx_INTR] = {
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF1_7xxx_INTR] = {
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF2_7xxx_INTR] = {
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF3_7xxx_INTR] = {
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF4_7xxx_INTR] = {
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF5_7xxx_INTR] = {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 1ee824600995..c478d25f7825 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 5d2ff6791058..acfe1b31e079 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
va_list va;
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+ if (!new_blk)
+ return;
va_start(va, fmt);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index f1418722c549..26f4b6959c31 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -206,17 +206,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
rc = -ETIMEDOUT;
goto end;
}
-
- /* fail safe edid */
- mutex_lock(&connector->dev->mode_config.mutex);
- if (drm_add_modes_noedid(connector, 640, 480))
- drm_set_preferred_mode(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
- } else {
- /* always add fail-safe mode as backup mode */
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_add_modes_noedid(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
}
if (panel->aux_cfg_update_done) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0c1b7dde377c..9f6af0f0fe00 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
fail:
- connector->funcs->destroy(msm_dsi->connector);
+ connector->funcs->destroy(connector);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 02b9ae65a96a..a4f61972667b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
+ put_task_struct(task);
} else {
comm = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index daf9f87477ba..a2141d3d9b1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -46,8 +46,9 @@ static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl)
{
- const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
- if (nb < 0 || nb >= 100)
+ const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
+
+ if (nb < 0)
return false;
if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev);
goto fail_alloc;
}
@@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
return;
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 992cc285f2fe..2ed528c065fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
mutex_init(&tdev->iommu.mutex);
- if (iommu_present(&platform_bus_type)) {
+ if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain)
goto error;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index e1772211b0a4..612310d5d481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -216,6 +216,7 @@ gm20b_pmu = {
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
+ .reset = gf100_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 6bf7fc1bd1e3..1a6f9c3af5ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -23,7 +23,7 @@
*/
#include "priv.h"
-static void
+void
gp102_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index ba1583bb618b..94cfb1791af6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -83,6 +83,7 @@ gp10b_pmu = {
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
+ .reset = gp102_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index bcaade758ff7..21abf31f4442 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
+void gp102_pmu_reset(struct nvkm_pmu *pmu);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index a07ef26234e5..6826f4d4826a 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
int ret;
vcc = devm_regulator_get_optional(dev, "vcc");
- if (IS_ERR(vcc))
+ if (IS_ERR(vcc)) {
dev_err(dev, "get optional vcc failed\n");
+ vcc = NULL;
+ }
dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
struct mipi_dbi_dev, drm);
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 46029c5610c8..145047e19394 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
return 0;
}
-static int rpi_touchscreen_enable(struct drm_panel *panel)
+static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
+ return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
- .prepare = rpi_touchscreen_noop,
+ .prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index b991ba1bcd51..f63efd8d5e52 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_fence *f;
int r = 0;
- dma_resv_for_each_fence(&cursor, resv, shared, f) {
+ dma_resv_for_each_fence(&cursor, resv, !shared, f) {
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 56ae38389db0..462fae73eae9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -222,13 +222,11 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
/* Set the physical address of the buffer in memory */
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
- paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
if (fb->format->num_planes > 1) {
paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
- paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
paddr);
@@ -236,7 +234,6 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
if (fb->format->num_planes > 2) {
paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
- paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
paddr);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index de3424fed2fc..6cf2621786e6 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -2,6 +2,9 @@
config DRM_VC4
tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
+ # Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
+ # happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 752f921735c6..98308a17e4ed 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
unsigned long phy_clock;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6c58b0fd13fb..98b78ec6b37d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -38,6 +38,7 @@
#include <drm/drm_scdc_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 31aecc46624b..04c8a378aeed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
return container_of(bo, struct vmw_buffer_object, base);
}
+/**
+ * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
+ * @bo: ttm buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+static bool bo_is_vmw(struct ttm_buffer_object *bo)
+{
+ return bo->destroy == &vmw_bo_bo_free ||
+ bo->destroy == &vmw_gem_destroy;
+}
/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
- ret != -EBUSY)) {
+ if (unlikely(ret != 0)) {
+ if (ret == -ERESTARTSYS || ret == -EBUSY)
+ return -EBUSY;
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
- if (vmw_bo_is_vmw_bo(bo))
+ if (!bo_is_vmw(bo))
return;
/* Kill any cached kernel maps before swapout */
@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (vmw_bo_is_vmw_bo(bo))
+ if (!bo_is_vmw(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
-
-/**
- * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
- * @bo: buffer object to be checked
- *
- * Uses destroy function associated with the object to determine if this is
- * a &vmw_buffer_object.
- *
- * Returns:
- * true if the object is of &vmw_buffer_object type, false if not.
- */
-bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &vmw_bo_bo_free ||
- bo->destroy == &vmw_gem_destroy)
- return true;
-
- return false;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index a3bfbb6c3e14..162dfeb1cc5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
- if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
+ if (!vmw_has_fences(dev_priv)) {
/*
* Don't request hardware to send a fence. The
@@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
*/
bool vmw_cmd_supported(struct vmw_private *vmw)
{
- if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
- SVGA_CAP_CMD_BUFFERS_2)) != 0)
- return true;
+ bool has_cmdbufs =
+ (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+ SVGA_CAP_CMD_BUFFERS_2)) != 0;
+ if (vmw_is_svga_v3(vmw))
+ return (has_cmdbufs &&
+ (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
/*
* We have FIFO cmd's
*/
- return vmw->fifo_mem != NULL;
+ return has_cmdbufs || vmw->fifo_mem != NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 26eb5478394a..163c00793eb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_no_fman;
}
- drm_vma_offset_manager_init(&dev_priv->vma_manager,
- DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping,
- &dev_priv->vma_manager,
+ dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
- drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
- &dev_priv->vma_manager);
+ dev_priv->drm.vma_offset_manager);
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index ea3ecdda561d..6de0b9ef5c77 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
}
+static inline bool vmw_has_fences(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+ SVGA_CAP_CMD_BUFFERS_2)) != 0)
+ return true;
+ return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 8ee34576c7d0..adf17c740656 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 59d6a2dd4c2e..66cc35dc223e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
+static u32 vmw_fence_goal_read(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+ return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
+ else
+ return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
+}
+
+static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
+{
+ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+ vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
+ else
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
+}
+
/*
* Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called
@@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
if (likely(!fman->seqno_valid))
return false;
- goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
@@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
- vmw_fifo_mem_write(fman->dev_priv,
- SVGA_FIFO_FENCE_GOAL,
- fence->base.seqno);
+ vmw_fence_goal_write(fman->dev_priv,
+ fence->base.seqno);
break;
}
}
@@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
if (dma_fence_is_signaled_locked(&fence->base))
return false;
- goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
- vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
- fence->base.seqno);
+ vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
fman->seqno_valid = true;
return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index c5191de365ca..fe4732bf2c9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -32,6 +32,14 @@
#define VMW_FENCE_WRAP (1 << 24)
+static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+ return SVGA_IRQFLAG_REG_FENCE_GOAL;
+ else
+ return SVGA_IRQFLAG_FENCE_GOAL;
+}
+
/**
* vmw_thread_fn - Deferred (process context) irq handler
*
@@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
wake_up_all(&dev_priv->fifo_queue);
if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) &&
+ vmw_irqflag_fence_goal(dev_priv))) &&
!test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD;
@@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
- if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
- vmw_fifo_idle(dev_priv, seqno))
+ if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
return true;
/**
@@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
unsigned long timeout)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
+ bool fifo_down = false;
uint32_t count = 0;
uint32_t signal_seq;
@@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
*/
if (fifo_idle) {
- down_read(&fifo_state->rwsem);
if (dev_priv->cman) {
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
10*HZ);
if (ret)
goto out_err;
+ } else if (fifo_state) {
+ down_read(&fifo_state->rwsem);
+ fifo_down = true;
}
}
@@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
- if (ret == 0 && fifo_idle)
+ if (ret == 0 && fifo_idle && fifo_state)
vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue);
out_err:
- if (fifo_idle)
+ if (fifo_down)
up_read(&fifo_state->rwsem);
return ret;
@@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+ vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+ vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbd2f4ec08ec..93431e8f6606 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
is_bo_proxy);
-
/*
* vmw_create_bo_proxy() adds a reference that is no longer
* needed
@@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0],
&surface, &bo);
- if (ret)
+ if (ret) {
+ DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
+ mode_cmd->handles[0], mode_cmd->handles[0]);
goto err_out;
+ }
if (!bo &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
- DRM_ERROR("Surface size cannot exceed %dx%d",
+ DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
goto err_out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 00e8e27e4884..ace7ca150b03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
+ if (base->shareable && res && res->backup)
+ drm_gem_object_put(&res->backup->base.base);
+
*p_base = NULL;
vmw_resource_unreference(&res);
}
@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
+ drm_gem_object_get(&res->backup->base.base);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup);
if (ret == 0)
vmw_bo_reference(res->backup);
-
}
if (unlikely(ret != 0)) {
@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
+ if (user_srf->prime.base.shareable)
+ drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 666223c6bec4..0a34e0ab4fe6 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
error = rate / (sig->mode.pixelclock / 1000);
- dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
- rate, div, (signed)(error - 1000) / 10, error % 10);
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
+ rate, div, error < 1000 ? '-' : '+',
+ abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 26d269ba947c..85a2142c9384 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
* execute:
*
* (a) In the "normal (i.e., not resuming from hibernation)" path,
- * the full barrier in smp_store_mb() guarantees that the store
+ * the full barrier in virt_store_mb() guarantees that the store
* is propagated to all CPUs before the add_channel_work work
* is queued. In turn, add_channel_work is queued before the
* channel's ring buffer is allocated/initialized and the
@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
* recv_int_page before retrieving the channel pointer from the
* array of channels.
*
- * (b) In the "resuming from hibernation" path, the smp_store_mb()
+ * (b) In the "resuming from hibernation" path, the virt_store_mb()
* guarantees that the store is propagated to all CPUs before
* the VMBus connection is marked as ready for the resume event
* (cf. check_ready_for_resume_event()). The interrupt handler
* of the VMBus driver and vmbus_chan_sched() can not run before
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
*/
- smp_store_mb(
+ virt_store_mb(
vmbus_connection.channels[channel->offermsg.child_relid],
channel);
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 439f99b8b5de..3248b48f37f6 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/completion.h>
+#include <linux/count_zeros.h>
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
@@ -1130,6 +1131,7 @@ static void post_status(struct hv_dynmem_device *dm)
struct dm_status status;
unsigned long now = jiffies;
unsigned long last_post = last_post_time;
+ unsigned long num_pages_avail, num_pages_committed;
if (pressure_report_delay > 0) {
--pressure_report_delay;
@@ -1154,16 +1156,21 @@ static void post_status(struct hv_dynmem_device *dm)
* num_pages_onlined) as committed to the host, otherwise it can try
* asking us to balloon them out.
*/
- status.num_avail = si_mem_available();
- status.num_committed = vm_memory_committed() +
+ num_pages_avail = si_mem_available();
+ num_pages_committed = vm_memory_committed() +
dm->num_pages_ballooned +
(dm->num_pages_added > dm->num_pages_onlined ?
dm->num_pages_added - dm->num_pages_onlined : 0) +
compute_balloon_floor();
- trace_balloon_status(status.num_avail, status.num_committed,
+ trace_balloon_status(num_pages_avail, num_pages_committed,
vm_memory_committed(), dm->num_pages_ballooned,
dm->num_pages_added, dm->num_pages_onlined);
+
+ /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
+ status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
+ status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
+
/*
* If our transaction ID is no longer current, just don't
* send the status. This can happen if we were interrupted
@@ -1653,6 +1660,38 @@ static void disable_page_reporting(void)
}
}
+static int ballooning_enabled(void)
+{
+ /*
+ * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
+ * since currently it's unclear to us whether an unballoon request can
+ * make sure all page ranges are guest page size aligned.
+ */
+ if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
+ pr_info("Ballooning disabled because page size is not 4096 bytes\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int hot_add_enabled(void)
+{
+ /*
+ * Disable hot add on ARM64, because we currently rely on
+ * memory_add_physaddr_to_nid() to get a node id of a hot add range,
+ * however ARM64's memory_add_physaddr_to_nid() always return 0 and
+ * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
+ * add_memory().
+ */
+ if (IS_ENABLED(CONFIG_ARM64)) {
+ pr_info("Memory hot add disabled on ARM64\n");
+ return 0;
+ }
+
+ return 1;
+}
+
static int balloon_connect_vsp(struct hv_device *dev)
{
struct dm_version_request version_req;
@@ -1724,8 +1763,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
* currently still requires the bits to be set, so we have to add code
* to fail the host's hot-add and balloon up/down requests, if any.
*/
- cap_msg.caps.cap_bits.balloon = 1;
- cap_msg.caps.cap_bits.hot_add = 1;
+ cap_msg.caps.cap_bits.balloon = ballooning_enabled();
+ cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/*
* Specify our alignment requirements as it relates
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index c1dd21d0d7ef..ae68298c0dca 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -20,6 +20,7 @@
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
+#include <linux/dma-map-ops.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -218,6 +219,16 @@ bool hv_query_ext_cap(u64 cap_query)
}
EXPORT_SYMBOL_GPL(hv_query_ext_cap);
+void hv_setup_dma_ops(struct device *dev, bool coherent)
+{
+ /*
+ * Hyper-V does not offer a vIOMMU in the guest
+ * VM, so pass 0/NULL for the IOMMU settings
+ */
+ arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
+
bool hv_is_hibernation_supported(void)
{
return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 71efacb90965..3d215d9dec43 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
- u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+ u32 write_loc;
+
+ /*
+ * The Hyper-V host writes the packet data, then uses
+ * store_release() to update the write_index. Use load_acquire()
+ * here to prevent loads of the packet data from being re-ordered
+ * before the read of the write_index and potentially getting
+ * stale data.
+ */
+ write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 60ee8b329f9e..14de17087864 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
/*
* Hyper-V should be notified only once about a panic. If we will be
- * doing hyperv_report_panic_msg() later with kmsg data, don't do
- * the notification here.
+ * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+ * here.
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
&& hyperv_report_reg()) {
@@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
/*
* Hyper-V should be notified only once about a panic. If we will be
- * doing hyperv_report_panic_msg() later with kmsg data, don't do
- * the notification here.
+ * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+ * here.
*/
if (hyperv_report_reg())
hyperv_report_panic(regs, val, true);
@@ -921,6 +921,21 @@ static int vmbus_probe(struct device *child_device)
}
/*
+ * vmbus_dma_configure -- Configure DMA coherence for VMbus device
+ */
+static int vmbus_dma_configure(struct device *child_device)
+{
+ /*
+ * On ARM64, propagate the DMA coherence setting from the top level
+ * VMbus ACPI device to the child VMbus device being added here.
+ * On x86/x64 coherence is assumed and these calls have no effect.
+ */
+ hv_setup_dma_ops(child_device,
+ device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
+ return 0;
+}
+
+/*
* vmbus_remove - Remove a vmbus device
*/
static void vmbus_remove(struct device *child_device)
@@ -1040,6 +1055,7 @@ static struct bus_type hv_bus = {
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
+ .dma_configure = vmbus_dma_configure,
.dev_groups = vmbus_dev_groups,
.drv_groups = vmbus_drv_groups,
.bus_groups = vmbus_bus_groups,
@@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void)
if (ret)
goto err_connect;
+ if (hv_is_isolation_supported())
+ sysctl_record_panic_msg = 0;
+
/*
* Only register if the crash MSRs are available
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
u64 hyperv_crash_ctl;
/*
- * Sysctl registration is not fatal, since by default
- * reporting is enabled.
+ * Panic message recording (sysctl_record_panic_msg)
+ * is enabled by default in non-isolated guests and
+ * disabled by default in isolated guests; the panic
+ * message recording won't be available in isolated
+ * guests should the following registration fail.
*/
hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
if (!hv_ctl_table_hdr)
@@ -2097,6 +2119,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
child_device_obj->device.parent = &hv_acpi_dev->dev;
child_device_obj->device.release = vmbus_device_release;
+ child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+
/*
* Register with the LDM. This will kick off the driver/device
* binding...which will eventually call vmbus_match() and vmbus_probe()
@@ -2122,9 +2148,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
- child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
- child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
- dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
return 0;
err_kset_unregister:
@@ -2428,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device)
hv_acpi_dev = device;
+ /*
+ * Older versions of Hyper-V for ARM64 fail to include the _CCA
+ * method on the top level VMbus device in the DSDT. But devices
+ * are hardware coherent in all current Hyper-V use cases, so fix
+ * up the ACPI device to behave as if _CCA is present and indicates
+ * hardware coherence.
+ */
+ ACPI_COMPANION_SET(&device->dev, device);
+ if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
+ device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
+ pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
+ device->flags.cca_seen = true;
+ device->flags.coherent_dma = true;
+ }
+
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
@@ -2780,10 +2818,15 @@ static void __exit vmbus_exit(void)
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_block);
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &hyperv_panic_block);
}
+ /*
+ * The panic notifier is always registered, hence we should
+ * also unconditionally unregister it here as well.
+ */
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &hyperv_panic_block);
+
free_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 68a8a27ab3b7..f2b038fa3b84 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -960,7 +960,7 @@ config SENSORS_LTC4261
config SENSORS_LTQ_CPUTEMP
bool "Lantiq cpu temperature sensor driver"
- depends on LANTIQ
+ depends on SOC_XWAY
help
If you say yes here you get support for the temperature
sensor inside your CPU.
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index fb6d14d213a1..c67cd037a93f 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -19,6 +19,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/regmap.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/util_macros.h>
@@ -294,11 +295,10 @@ static int adt7470_update_thread(void *p)
adt7470_read_temperatures(data);
mutex_unlock(&data->lock);
- set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
- schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
+ schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval));
}
return 0;
diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c
index 8fdcb62ae52d..9e935e34c998 100644
--- a/drivers/hwmon/asus_wmi_sensors.c
+++ b/drivers/hwmon/asus_wmi_sensors.c
@@ -71,7 +71,7 @@ static const struct dmi_system_id asus_wmi_dmi_table[] = {
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X399-A"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI EXTREME"),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("CROSSHAIR VI HERO"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO (WI-FI AC)"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO"),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"),
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 938a8b9ec70d..6830e029995d 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1578,8 +1578,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
temp *= 125;
if (sign)
temp -= 128000;
- } else
- temp = data->temp[nr] * 1000;
+ } else {
+ temp = ((s8)data->temp[nr]) * 1000;
+ }
return sprintf(buf, "%d\n", temp);
}
diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
index 40dffd9c4cbf..f546f0c12497 100644
--- a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
+++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
@@ -14,6 +14,21 @@
#define AHE50DC_PMBUS_READ_TEMP4 0xd0
+static int ahe50dc_fan_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ /*
+ * The CLEAR_FAULTS operation seems to sometimes (unpredictably, perhaps
+ * 5% of the time or so) trigger a problematic phenomenon in which the
+ * fan speeds surge momentarily and at least some (perhaps all?) of the
+ * system's power outputs experience a glitch.
+ *
+ * However, according to Delta it should be OK to simply not send any
+ * CLEAR_FAULTS commands (the device doesn't seem to be capable of
+ * reporting any faults anyway), so just blackhole them unconditionally.
+ */
+ return value == PMBUS_CLEAR_FAULTS ? -EOPNOTSUPP : -ENODATA;
+}
+
static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg)
{
/* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */
@@ -68,6 +83,7 @@ static struct pmbus_driver_info ahe50dc_fan_info = {
PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 |
PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL,
.func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL,
+ .write_byte = ahe50dc_fan_write_byte,
.read_word_data = ahe50dc_fan_read_word_data,
};
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index b2618b1d529e..d93574d6a1fb 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2326,6 +2326,9 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->has_status_word = true;
}
+ /* Make sure PEC is disabled, will be enabled later if needed */
+ client->flags &= ~I2C_CLIENT_PEC;
+
/* Enable PEC if the controller and bus supports it */
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index 18fffc5d749b..32bc7736d609 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -124,7 +124,7 @@ static int xdpe122_identify(struct i2c_client *client,
return 0;
}
-static const struct regulator_desc xdpe122_reg_desc[] = {
+static const struct regulator_desc __maybe_unused xdpe122_reg_desc[] = {
PMBUS_REGULATOR("vout", 0),
PMBUS_REGULATOR("vout", 1),
};
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index b86d9df7105d..52c9e7d3f2ae 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -708,10 +708,21 @@ static int tmp401_probe(struct i2c_client *client)
return 0;
}
+static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
+ { .compatible = "ti,tmp401", },
+ { .compatible = "ti,tmp411", },
+ { .compatible = "ti,tmp431", },
+ { .compatible = "ti,tmp432", },
+ { .compatible = "ti,tmp435", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
+
static struct i2c_driver tmp401_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "tmp401",
+ .of_match_table = of_match_ptr(tmp4xx_of_match),
},
.probe_new = tmp401_probe,
.id_table = tmp401_id,
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 27f969b3dc07..e9e2db68b9fb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -179,6 +179,12 @@ struct imx_i2c_hwdata {
unsigned int ndivs;
unsigned int i2sr_clr_opcode;
unsigned int i2cr_ien_opcode;
+ /*
+ * Errata ERR007805 or e7805:
+ * I2C: When the I2C clock speed is configured for 400 kHz,
+ * the SCL low period violates the I2C spec of 1.3 uS min.
+ */
+ bool has_err007805;
};
struct imx_i2c_dma {
@@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
};
+static const struct imx_i2c_hwdata imx6_i2c_hwdata = {
+ .devtype = IMX21_I2C,
+ .regshift = IMX_I2C_REGSHIFT,
+ .clk_div = imx_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(imx_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_1,
+ .has_err007805 = true,
+};
+
static struct imx_i2c_hwdata vf610_i2c_hwdata = {
.devtype = VF610_I2C,
.regshift = VF610_I2C_REGSHIFT,
@@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+ { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
{ /* sentinel */ }
};
@@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
unsigned int div;
int i;
+ if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) {
+ dev_dbg(&i2c_imx->adapter.dev,
+ "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n",
+ i2c_imx->bitrate);
+ i2c_imx->bitrate = 384000;
+ }
+
/* Divider value calculation */
if (i2c_imx->cur_clk == i2c_clk_rate)
return;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f4820fd3dc13..c16157ee8c52 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -82,6 +82,7 @@
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
+#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
/* Hardware Descriptor Constants - Control Field */
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
@@ -145,8 +146,8 @@
#define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */
#define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */
#define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */
-#define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */
-#define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */
+#define ISMT_SPGT_SPD_400K (0x2U << 30) /* 400 kHz */
+#define ISMT_SPGT_SPD_1M (0x3U << 30) /* 1 MHz */
/* MSI Control Register (MSICTL) bit definitions */
@@ -175,6 +176,8 @@ struct ismt_priv {
u8 head; /* ring buffer head pointer */
struct completion cmp; /* interrupt completion */
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
+ dma_addr_t log_dma;
+ u32 *log;
};
static const struct pci_device_id ismt_ids[] = {
@@ -411,6 +414,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
memset(desc, 0, sizeof(struct ismt_desc));
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
+ /* Always clear the log entries */
+ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
+
/* Initialize common control bits */
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
@@ -708,6 +714,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
/* initialize the Master Descriptor Base Address (MDBA) */
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
+ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
+
/* initialize the Master Control Register (MCTRL) */
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
@@ -795,6 +803,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
priv->head = 0;
init_completion(&priv->cmp);
+ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
+ ISMT_LOG_ENTRIES * sizeof(u32),
+ &priv->log_dma, GFP_KERNEL);
+ if (!priv->log)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 45fe4a7fe0c0..901f0fb04fee 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_disable_clk;
}
adap = &i2c->adap;
@@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
if (ret < 0)
- return ret;
+ goto err_disable_clk;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2c->clk);
+
return ret;
}
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 7728c8460dc0..9028ffb58cc0 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
+
+ if (stop) {
+ err = pasemi_smb_waitready(smbus);
+ if (err)
+ goto reset_out;
+ }
}
return 0;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index fc1dcc19f2a1..5b920f0fc7dd 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
- if (ret) {
- dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 12c90aa0900e..a77cd86fe75e 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
i2c->adap.dev.parent = dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c->adap.dev.fwnode = dev->fwnode;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
i2c_set_adapdata(&i2c->adap, i2c);
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cf5d049342ea..ab0adaa130da 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
.addr = umsg.addr,
.flags = umsg.flags,
.len = umsg.len,
- .buf = compat_ptr(umsg.buf)
+ .buf = (__force __u8 *)compat_ptr(umsg.buf),
};
}
@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
i2c_dev->dev.class = i2c_dev_class;
i2c_dev->dev.parent = &adap->dev;
i2c_dev->dev.release = i2cdev_dev_release;
- dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+ if (res)
+ goto err_put_i2c_dev;
res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
- if (res) {
- put_i2c_dev(i2c_dev, false);
- return res;
- }
+ if (res)
+ goto err_put_i2c_dev;
pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
return 0;
+
+err_put_i2c_dev:
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b7640cfe0020..47551ab73ca8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -69,7 +69,12 @@ static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static unsigned long auto_demotion_disable_flags;
-static bool disable_promotion_to_c1e;
+
+static enum {
+ C1E_PROMOTION_PRESERVE,
+ C1E_PROMOTION_ENABLE,
+ C1E_PROMOTION_DISABLE
+} c1e_promotion = C1E_PROMOTION_PRESERVE;
struct idle_cpu {
struct cpuidle_state *state_table;
@@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
-static void c1e_promotion_enable(void);
-
/**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
@@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void)
unsigned long long msr;
/* Check if user prefers C1E over C1. */
- if (preferred_states_mask & BIT(2)) {
- if (preferred_states_mask & BIT(1))
- /* Both can't be enabled, stick to the defaults. */
- return;
-
+ if ((preferred_states_mask & BIT(2)) &&
+ !(preferred_states_mask & BIT(1))) {
+ /* Disable C1 and enable C1E. */
spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
/* Enable C1E using the "C1E promotion" bit. */
- c1e_promotion_enable();
- disable_promotion_to_c1e = false;
+ c1e_promotion = C1E_PROMOTION_ENABLE;
}
/*
@@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
if (auto_demotion_disable_flags)
auto_demotion_disable();
- if (disable_promotion_to_c1e)
+ if (c1e_promotion == C1E_PROMOTION_ENABLE)
+ c1e_promotion_enable();
+ else if (c1e_promotion == C1E_PROMOTION_DISABLE)
c1e_promotion_disable();
return 0;
@@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void)
if (icpu) {
cpuidle_state_table = icpu->state_table;
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
- disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
+ if (icpu->disable_promotion_to_c1e)
+ c1e_promotion = C1E_PROMOTION_DISABLE;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index ef9d27759961..ec9acbf12b9a 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -745,7 +745,7 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_DIR_RISING:
addr = AD7280A_CELL_OVERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
st->cell_threshhigh = value;
@@ -753,7 +753,7 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_DIR_FALLING:
addr = AD7280A_CELL_UNDERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
st->cell_threshlow = value;
@@ -770,18 +770,18 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_DIR_RISING:
addr = AD7280A_AUX_ADC_OVERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
- st->aux_threshhigh = val;
+ st->aux_threshhigh = value;
break;
case IIO_EV_DIR_FALLING:
addr = AD7280A_AUX_ADC_UNDERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
- st->aux_threshlow = val;
+ st->aux_threshlow = value;
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index 20d4e7584e92..37143b5526ee 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -471,12 +471,15 @@ static ssize_t calibration_forced_value_store(struct device *dev,
ret = scd4x_write_and_fetch(state, CMD_FRC, arg, &val, sizeof(val));
mutex_unlock(&state->lock);
+ if (ret)
+ return ret;
+
if (val == 0xff) {
dev_err(dev, "forced calibration has failed");
return -EINVAL;
}
- return ret ?: len;
+ return len;
}
static IIO_DEVICE_ATTR_RW(calibration_auto_enable, 0);
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index 97f13c0b9631..d5ea1a1be122 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -656,7 +656,7 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
{
struct reg_addr_pool addr;
int ret;
- u16 val;
+ int val;
dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset",
GPIOD_OUT_LOW);
@@ -809,10 +809,10 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
gain_child = fwnode_get_named_child_node(child,
"custom-output-range-config");
- if (IS_ERR(gain_child)) {
+ if (!gain_child) {
dev_err(dev,
"mandatory custom-output-range-config property missing\n");
- return PTR_ERR(gain_child);
+ return -EINVAL;
}
dac->ch_data[ch].range_override = 1;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 14cfabacbea5..fdf824041497 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -178,7 +178,7 @@ static int ad5446_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- *val = st->cached_val;
+ *val = st->cached_val >> chan->scan_type.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_mv;
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index a424b7220b61..4434c1b2a322 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -522,7 +522,7 @@ static int ad5592r_alloc_channels(struct iio_dev *iio_dev)
if (!ret)
st->channel_modes[reg] = tmp;
- fwnode_property_read_u32(child, "adi,off-state", &tmp);
+ ret = fwnode_property_read_u32(child, "adi,off-state", &tmp);
if (!ret)
st->channel_offstate[reg] = tmp;
}
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index e41861d29767..2f9c384885f4 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -298,7 +298,7 @@ static int ltc2688_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- *val = 16;
+ *val2 = 16;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_CALIBBIAS:
ret = regmap_read(st->regmap,
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 4a3b8d875518..0b775f943db3 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
enum chip_id {
@@ -311,6 +312,7 @@ static int dac5571_probe(struct i2c_client *client,
const struct dac5571_spec *spec;
struct dac5571_data *data;
struct iio_dev *indio_dev;
+ enum chip_id chip_id;
int ret, i;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -326,7 +328,13 @@ static int dac5571_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = dac5571_channels;
- spec = &dac5571_spec[id->driver_data];
+ if (dev_fwnode(dev))
+ chip_id = (uintptr_t)device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
+
+ spec = &dac5571_spec[chip_id];
+
indio_dev->num_channels = spec->num_channels;
data->spec = spec;
@@ -385,15 +393,15 @@ static int dac5571_remove(struct i2c_client *i2c)
}
static const struct of_device_id dac5571_of_id[] = {
- {.compatible = "ti,dac5571"},
- {.compatible = "ti,dac6571"},
- {.compatible = "ti,dac7571"},
- {.compatible = "ti,dac5574"},
- {.compatible = "ti,dac6574"},
- {.compatible = "ti,dac7574"},
- {.compatible = "ti,dac5573"},
- {.compatible = "ti,dac6573"},
- {.compatible = "ti,dac7573"},
+ {.compatible = "ti,dac5571", .data = (void *)single_8bit},
+ {.compatible = "ti,dac6571", .data = (void *)single_10bit},
+ {.compatible = "ti,dac7571", .data = (void *)single_12bit},
+ {.compatible = "ti,dac5574", .data = (void *)quad_8bit},
+ {.compatible = "ti,dac6574", .data = (void *)quad_10bit},
+ {.compatible = "ti,dac7574", .data = (void *)quad_12bit},
+ {.compatible = "ti,dac5573", .data = (void *)quad_8bit},
+ {.compatible = "ti,dac6573", .data = (void *)quad_10bit},
+ {.compatible = "ti,dac7573", .data = (void *)quad_12bit},
{}
};
MODULE_DEVICE_TABLE(of, dac5571_of_id);
diff --git a/drivers/iio/filter/Kconfig b/drivers/iio/filter/Kconfig
index 3ae35817ad82..a85b345ea14e 100644
--- a/drivers/iio/filter/Kconfig
+++ b/drivers/iio/filter/Kconfig
@@ -8,6 +8,7 @@ menu "Filters"
config ADMV8818
tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter"
depends on SPI && COMMON_CLK && 64BIT
+ select REGMAP_SPI
help
Say yes here to build support for Analog Devices ADMV8818
2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter.
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 824b5124a5f5..01336105792e 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -730,7 +730,7 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
if (ret)
- return ret;
+ goto disable_regulator;
usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
@@ -741,29 +741,37 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
if (use_spi) {
ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
if (ret)
- return ret;
+ goto disable_regulator;
}
ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
if (ret) {
dev_err(dev, "Error reading chip id\n");
- return ret;
+ goto disable_regulator;
}
if (val != BMI160_CHIP_ID_VAL) {
dev_err(dev, "Wrong chip id, got %x expected %x\n",
val, BMI160_CHIP_ID_VAL);
- return -ENODEV;
+ ret = -ENODEV;
+ goto disable_regulator;
}
ret = bmi160_set_mode(data, BMI160_ACCEL, true);
if (ret)
- return ret;
+ goto disable_regulator;
ret = bmi160_set_mode(data, BMI160_GYRO, true);
if (ret)
- return ret;
+ goto disable_accel;
return 0;
+
+disable_accel:
+ bmi160_set_mode(data, BMI160_ACCEL, false);
+
+disable_regulator:
+ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
+ return ret;
}
static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 33d9afb1ba91..d4a692b838d0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -18,12 +18,15 @@ static int inv_icm42600_i2c_bus_setup(struct inv_icm42600_state *st)
unsigned int mask, val;
int ret;
- /* setup interface registers */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
- INV_ICM42600_INTF_CONFIG6_MASK,
- INV_ICM42600_INTF_CONFIG6_I3C_EN);
- if (ret)
- return ret;
+ /*
+ * setup interface registers
+ * This register write to REG_INTF_CONFIG6 enables a spike filter that
+ * is impacting the line and can prevent the I2C ACK to be seen by the
+ * controller. So we don't test the return value.
+ */
+ regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
+ INV_ICM42600_INTF_CONFIG6_MASK,
+ INV_ICM42600_INTF_CONFIG6_I3C_EN);
ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 088f748b683e..2432e697150c 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -416,6 +416,7 @@ static int ak8975_power_on(const struct ak8975_data *data)
if (ret) {
dev_warn(&data->client->dev,
"Failed to enable specified Vid supply\n");
+ regulator_disable(data->vdd);
return ret;
}
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 0d9bbbb50cb4..70c37f664f6d 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -70,13 +70,17 @@
#define SX9324_REG_AFE_PH2 0x2a
#define SX9324_REG_AFE_PH3 0x2b
#define SX9324_REG_AFE_CTRL8 0x2c
-#define SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM 0x02
+#define SX9324_REG_AFE_CTRL8_RESERVED 0x10
+#define SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM 0x02
#define SX9324_REG_AFE_CTRL9 0x2d
#define SX9324_REG_AFE_CTRL9_AGAIN_1 0x08
#define SX9324_REG_PROX_CTRL0 0x30
#define SX9324_REG_PROX_CTRL0_GAIN_MASK GENMASK(5, 3)
-#define SX9324_REG_PROX_CTRL0_GAIN_1 0x80
+#define SX9324_REG_PROX_CTRL0_GAIN_SHIFT 3
+#define SX9324_REG_PROX_CTRL0_GAIN_RSVD 0x0
+#define SX9324_REG_PROX_CTRL0_GAIN_1 0x1
+#define SX9324_REG_PROX_CTRL0_GAIN_8 0x4
#define SX9324_REG_PROX_CTRL0_RAWFILT_MASK GENMASK(2, 0)
#define SX9324_REG_PROX_CTRL0_RAWFILT_1P50 0x01
#define SX9324_REG_PROX_CTRL1 0x31
@@ -379,7 +383,14 @@ static int sx9324_read_gain(struct sx_common_data *data,
if (ret)
return ret;
- *val = 1 << FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval);
+ regval = FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval);
+ if (regval)
+ regval--;
+ else if (regval == SX9324_REG_PROX_CTRL0_GAIN_RSVD ||
+ regval > SX9324_REG_PROX_CTRL0_GAIN_8)
+ return -EINVAL;
+
+ *val = 1 << regval;
return IIO_VAL_INT;
}
@@ -725,8 +736,12 @@ static int sx9324_write_gain(struct sx_common_data *data,
unsigned int gain, reg;
int ret;
- gain = ilog2(val);
reg = SX9324_REG_PROX_CTRL0 + chan->channel / 2;
+
+ gain = ilog2(val) + 1;
+ if (val <= 0 || gain > SX9324_REG_PROX_CTRL0_GAIN_8)
+ return -EINVAL;
+
gain = FIELD_PREP(SX9324_REG_PROX_CTRL0_GAIN_MASK, gain);
mutex_lock(&data->mutex);
@@ -781,12 +796,15 @@ static const struct sx_common_reg_default sx9324_default_regs[] = {
{ SX9324_REG_AFE_PH2, 0x1a },
{ SX9324_REG_AFE_PH3, 0x16 },
- { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM },
+ { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESERVED |
+ SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM },
{ SX9324_REG_AFE_CTRL9, SX9324_REG_AFE_CTRL9_AGAIN_1 },
- { SX9324_REG_PROX_CTRL0, SX9324_REG_PROX_CTRL0_GAIN_1 |
+ { SX9324_REG_PROX_CTRL0,
+ SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT |
SX9324_REG_PROX_CTRL0_RAWFILT_1P50 },
- { SX9324_REG_PROX_CTRL1, SX9324_REG_PROX_CTRL0_GAIN_1 |
+ { SX9324_REG_PROX_CTRL1,
+ SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT |
SX9324_REG_PROX_CTRL0_RAWFILT_1P50 },
{ SX9324_REG_PROX_CTRL2, SX9324_REG_PROX_CTRL2_AVGNEG_THRESH_16K },
{ SX9324_REG_PROX_CTRL3, SX9324_REG_PROX_CTRL3_AVGDEB_2SAMPLES |
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index a7c07316a0a9..8ad814d96b7e 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -521,6 +521,7 @@ int sx_common_probe(struct i2c_client *client,
return dev_err_probe(dev, ret, "error reading WHOAMI\n");
ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(dev));
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->chip_info->iio_channels;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 35f0d5e7533d..1c107d6d03b9 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
+ case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->msg);
break;
- case IB_CM_MRA_REP_RCVD:
- break;
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
[CM_DREQ_COUNTER]);
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 876cc78a22cc..7333646021bb 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
unsigned long flags;
struct list_head del_list;
+ /* Prevent freeing of mm until we are completely finished. */
+ mmgrab(handler->mn.mm);
+
/* Unregister first so we don't get any more notifications. */
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
do_remove(handler, &del_list);
+ /* Now the mm may be freed. */
+ mmdrop(handler->mn.mm);
+
kfree(handler);
}
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index dedb3b7edd8d..638bf4a1ed94 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -2308,10 +2308,8 @@ err:
return NULL;
}
-static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
+static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
{
- struct irdma_cm_node *cm_node =
- container_of(rcu_head, struct irdma_cm_node, rcu_head);
struct irdma_cm_core *cm_core = cm_node->cm_core;
struct irdma_qp *iwqp;
struct irdma_cm_info nfo;
@@ -2359,7 +2357,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
}
cm_core->cm_free_ah(cm_node);
- kfree(cm_node);
}
/**
@@ -2387,8 +2384,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- /* wait for all list walkers to exit their grace period */
- call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
+ irdma_destroy_connection(cm_node);
+
+ kfree_rcu(cm_node, rcu_head);
}
/**
@@ -3246,15 +3244,10 @@ int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
*/
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
{
- unsigned long flags;
-
if (!cm_core)
return;
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- if (timer_pending(&cm_core->tcp_timer))
- del_timer_sync(&cm_core->tcp_timer);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ del_timer_sync(&cm_core->tcp_timer);
destroy_workqueue(cm_core->event_wq);
cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
@@ -3467,12 +3460,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
}
cm_id = iwqp->cm_id;
- /* make sure we havent already closed this connection */
- if (!cm_id) {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- return;
- }
-
original_hw_tcp_state = iwqp->hw_tcp_state;
original_ibqp_state = iwqp->ibqp_state;
last_ae = iwqp->last_aeq;
@@ -3494,11 +3481,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
disconn_status = -ECONNRESET;
}
- if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
- original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
- last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
- last_ae == IRDMA_AE_BAD_CLOSE ||
- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
+ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 346c2c5dabdf..81760415d66c 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
u32 local_ipaddr[4] = {};
bool ipv4 = true;
- real_dev = rdma_vlan_dev_real_dev(netdev);
- if (!real_dev)
- real_dev = netdev;
-
- ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
- if (!ibdev)
- return NOTIFY_DONE;
-
- iwdev = to_iwdev(ibdev);
-
switch (event) {
case NETEVENT_NEIGH_UPDATE:
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
p = (__be32 *)neigh->primary_key;
if (neigh->tbl->family == AF_INET6) {
ipv4 = false;
@@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
irdma_manage_arp_cache(iwdev->rf, neigh->ha,
local_ipaddr, ipv4,
IRDMA_ARP_DELETE);
+ ib_device_put(ibdev);
break;
default:
break;
}
- ib_device_put(ibdev);
-
return NOTIFY_DONE;
}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 46f475394af5..52f3e88f8569 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1618,13 +1618,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
if (dont_wait) {
- if (iwqp->cm_id && iwqp->hw_tcp_state) {
+ if (iwqp->hw_tcp_state) {
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
iwqp->last_aeq = IRDMA_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
- irdma_cm_disconn(iwqp);
}
+ irdma_cm_disconn(iwqp);
} else {
int close_timer_started;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 956f8e875daa..32ef67e9a6a7 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
- if (need_delay)
+ if (need_delay) {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+ goto out;
+ }
remove_cache_mr_locked(ent);
queue_adjust_cache_locked(ent);
}
@@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
+ WRITE_ONCE(dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index ae50b56e8913..8ef112f883a7 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -3190,7 +3190,11 @@ serr_no_r_lock:
spin_lock_irqsave(&sqp->s_lock, flags);
rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ int lastwqe;
+
+ spin_lock(&sqp->r_lock);
+ lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&sqp->r_lock);
sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index ae8f11cb704a..873a9b10307c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -38,13 +38,13 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
}
/**
- * rxe_mcast_delete - delete multicast address from rxe device
+ * rxe_mcast_del - delete multicast address from rxe device
* @rxe: rxe device object
* @mgid: multicast address as a gid
*
* Returns 0 on success else an error
*/
-static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
@@ -143,11 +143,10 @@ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_mcg *mcg;
- unsigned long flags;
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
mcg = __rxe_lookup_mcg(rxe, mgid);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return mcg;
}
@@ -159,17 +158,10 @@ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
* @mcg: new mcg object
*
* Context: caller should hold rxe->mcg lock
- * Returns: 0 on success else an error
*/
-static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
- struct rxe_mcg *mcg)
+static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mcg *mcg)
{
- int err;
-
- err = rxe_mcast_add(rxe, mgid);
- if (unlikely(err))
- return err;
-
kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
@@ -184,8 +176,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
*/
kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
-
- return 0;
}
/**
@@ -198,7 +188,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_mcg *mcg, *tmp;
- unsigned long flags;
int err;
if (rxe->attr.max_mcast_grp == 0)
@@ -209,36 +198,38 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
if (mcg)
return mcg;
+ /* check to see if we have reached limit */
+ if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
+
/* speculative alloc of new mcg */
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return ERR_PTR(-ENOMEM);
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
+ spin_unlock_bh(&rxe->mcg_lock);
+ atomic_dec(&rxe->mcg_num);
kfree(mcg);
- mcg = tmp;
- goto out;
+ return tmp;
}
- if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
- err = -ENOMEM;
- goto err_dec;
- }
+ __rxe_init_mcg(rxe, mgid, mcg);
+ spin_unlock_bh(&rxe->mcg_lock);
- err = __rxe_init_mcg(rxe, mgid, mcg);
- if (err)
- goto err_dec;
-out:
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- return mcg;
+ /* add mcast address outside of lock */
+ err = rxe_mcast_add(rxe, mgid);
+ if (!err)
+ return mcg;
+ kfree(mcg);
err_dec:
atomic_dec(&rxe->mcg_num);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- kfree(mcg);
return ERR_PTR(err);
}
@@ -268,7 +259,6 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
__rxe_remove_mcg(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
- rxe_mcast_delete(mcg->rxe, &mcg->mgid);
atomic_dec(&rxe->mcg_num);
}
@@ -280,11 +270,12 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
*/
static void rxe_destroy_mcg(struct rxe_mcg *mcg)
{
- unsigned long flags;
+ /* delete mcast address outside of lock */
+ rxe_mcast_del(mcg->rxe, &mcg->mgid);
- spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
+ spin_lock_bh(&mcg->rxe->mcg_lock);
__rxe_destroy_mcg(mcg);
- spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
+ spin_unlock_bh(&mcg->rxe->mcg_lock);
}
/**
@@ -339,25 +330,24 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
{
struct rxe_dev *rxe = mcg->rxe;
struct rxe_mca *mca, *tmp;
- unsigned long flags;
int err;
/* check to see if the qp is already a member of the group */
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
list_for_each_entry(mca, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return 0;
}
}
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
/* speculative alloc new mca without using GFP_ATOMIC */
mca = kzalloc(sizeof(*mca), GFP_KERNEL);
if (!mca)
return -ENOMEM;
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
/* re-check to see if someone else just attached qp */
list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
if (tmp->qp == qp) {
@@ -371,7 +361,7 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
if (err)
kfree(mca);
out:
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return err;
}
@@ -405,9 +395,8 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
{
struct rxe_dev *rxe = mcg->rxe;
struct rxe_mca *mca, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
__rxe_cleanup_mca(mca, mcg);
@@ -421,13 +410,13 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
if (atomic_read(&mcg->qp_num) <= 0)
__rxe_destroy_mcg(mcg);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return 0;
}
}
/* we didn't find the qp on the list */
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 16fc7ea1298d..9cd0eaff98de 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -680,6 +680,11 @@ static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
* It is assumed that the access permissions if originally good
* are OK and the mappings to be unchanged.
*
+ * TODO: If someone reregisters an MR to change its size or
+ * access permissions during the processing of an RDMA read
+ * we should kill the responder resource and complete the
+ * operation with an error.
+ *
* Return: mr on success else NULL
*/
static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
@@ -690,23 +695,27 @@ static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
if (rkey_is_mw(rkey)) {
mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
- if (!mw || mw->rkey != rkey)
+ if (!mw)
return NULL;
- if (mw->state != RXE_MW_STATE_VALID) {
+ mr = mw->mr;
+ if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
+ !mr || mr->state != RXE_MR_STATE_VALID) {
rxe_put(mw);
return NULL;
}
- mr = mw->mr;
+ rxe_get(mr);
rxe_put(mw);
- } else {
- mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
- if (!mr || mr->rkey != rkey)
- return NULL;
+
+ return mr;
}
- if (mr->state != RXE_MR_STATE_VALID) {
+ mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ if (!mr)
+ return NULL;
+
+ if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
rxe_put(mr);
return NULL;
}
@@ -736,8 +745,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
}
if (res->state == rdatm_res_state_new) {
- mr = qp->resp.mr;
- qp->resp.mr = NULL;
+ if (!res->replay) {
+ mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+ } else {
+ mr = rxe_recheck_mr(qp, res->read.rkey);
+ if (!mr)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
if (res->read.resid <= mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 7acdd3c3a599..17f34d584cd9 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_set_inuse(new_cep);
rv = siw_proc_mpareq(new_cep);
- siw_cep_set_free(new_cep);
-
if (rv != -EAGAIN) {
siw_cep_put(cep);
new_cep->listen_cep = NULL;
- if (rv)
+ if (rv) {
+ siw_cep_set_free(new_cep);
goto error;
+ }
}
+ siw_cep_set_free(new_cep);
}
return;
diff --git a/drivers/input/keyboard/cypress-sf.c b/drivers/input/keyboard/cypress-sf.c
index c28996028e80..9a23eed6a4f4 100644
--- a/drivers/input/keyboard/cypress-sf.c
+++ b/drivers/input/keyboard/cypress-sf.c
@@ -61,6 +61,14 @@ static irqreturn_t cypress_sf_irq_handler(int irq, void *devid)
return IRQ_HANDLED;
}
+static void cypress_sf_disable_regulators(void *arg)
+{
+ struct cypress_sf_data *touchkey = arg;
+
+ regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators),
+ touchkey->regulators);
+}
+
static int cypress_sf_probe(struct i2c_client *client)
{
struct cypress_sf_data *touchkey;
@@ -121,6 +129,12 @@ static int cypress_sf_probe(struct i2c_client *client)
return error;
}
+ error = devm_add_action_or_reset(&client->dev,
+ cypress_sf_disable_regulators,
+ touchkey);
+ if (error)
+ return error;
+
touchkey->input_dev = devm_input_allocate_device(&client->dev);
if (!touchkey->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 43375b38ee59..8a7ce41b8c56 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* revision register.
*/
error = pm_runtime_get_sync(dev);
- if (error) {
+ if (error < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
pm_runtime_put_noidle(dev);
return error;
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 2bd407d86bae..e9bd36adbe47 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -756,15 +756,12 @@ static int ili251x_firmware_reset(struct i2c_client *client)
return ili251x_firmware_busy(client);
}
-static void ili251x_hardware_reset(struct device *dev)
+static void ili210x_hardware_reset(struct gpio_desc *reset_gpio)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ili210x *priv = i2c_get_clientdata(client);
-
/* Reset the controller */
- gpiod_set_value_cansleep(priv->reset_gpio, 1);
- usleep_range(10000, 15000);
- gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(12000, 15000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
msleep(300);
}
@@ -773,6 +770,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
const char *fwname = ILI251X_FW_FILENAME;
const struct firmware *fw;
u16 ac_end, df_end;
@@ -803,7 +801,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
dev_dbg(dev, "Firmware update started, firmware=%s\n", fwname);
- ili251x_hardware_reset(dev);
+ ili210x_hardware_reset(priv->reset_gpio);
error = ili251x_firmware_reset(client);
if (error)
@@ -858,7 +856,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
error = count;
exit:
- ili251x_hardware_reset(dev);
+ ili210x_hardware_reset(priv->reset_gpio);
dev_dbg(dev, "Firmware update ended, error=%i\n", error);
enable_irq(client->irq);
kfree(fwbuf);
@@ -951,9 +949,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
if (error)
return error;
- usleep_range(50, 100);
- gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(100);
+ ili210x_hardware_reset(reset_gpio);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 9050ca1f4285..808f6e7a8048 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1087,9 +1087,15 @@ static int of_count_icc_providers(struct device_node *np)
{
struct device_node *child;
int count = 0;
+ const struct of_device_id __maybe_unused ignore_list[] = {
+ { .compatible = "qcom,sc7180-ipa-virt" },
+ { .compatible = "qcom,sdx55-ipa-virt" },
+ {}
+ };
for_each_available_child_of_node(np, child) {
- if (of_property_read_bool(child, "#interconnect-cells"))
+ if (of_property_read_bool(child, "#interconnect-cells") &&
+ likely(!of_match_node(ignore_list, child)))
count++;
count += of_count_icc_providers(child);
}
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index 12d59c36df53..5f7c0f85fa8e 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -47,7 +47,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM
DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
@@ -129,7 +128,6 @@ DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
@@ -160,7 +158,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
@@ -372,22 +369,6 @@ static struct qcom_icc_desc sc7180_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node *ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static struct qcom_icc_desc sc7180_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
@@ -519,8 +500,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sc7180_dc_noc},
{ .compatible = "qcom,sc7180-gem-noc",
.data = &sc7180_gem_noc},
- { .compatible = "qcom,sc7180-ipa-virt",
- .data = &sc7180_ipa_virt},
{ .compatible = "qcom,sc7180-mc-virt",
.data = &sc7180_mc_virt},
{ .compatible = "qcom,sc7180-mmss-noc",
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index 03d604f84cc5..e3ac25a997b7 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -18,7 +18,6 @@
#include "icc-rpmh.h"
#include "sdx55.h"
-DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0);
DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC);
@@ -40,7 +39,6 @@ DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC);
DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4);
DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0);
DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC);
@@ -82,7 +80,6 @@ DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8);
DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg);
DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie);
@@ -219,22 +216,6 @@ static const struct qcom_icc_desc sdx55_system_noc = {
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node *ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static const struct qcom_icc_desc sdx55_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdx55-mc-virt",
.data = &sdx55_mc_virt},
@@ -242,8 +223,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sdx55_mem_noc},
{ .compatible = "qcom,sdx55-system-noc",
.data = &sdx55_system_noc},
- { .compatible = "qcom,sdx55-ipa-virt",
- .data = &sdx55_ipa_virt},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index decafb07ad08..8af0242a90d9 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -773,6 +773,7 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.get_resv_regions = apple_dart_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
+ .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = apple_dart_attach_dev,
.detach_dev = apple_dart_detach_dev,
@@ -859,16 +860,15 @@ static int apple_dart_probe(struct platform_device *pdev)
dart->dev = dev;
spin_lock_init(&dart->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(dart->regs))
+ return PTR_ERR(dart->regs);
+
if (resource_size(res) < 0x4000) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
}
- dart->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(dart->regs))
- return PTR_ERR(dart->regs);
-
dart->irq = platform_get_irq(pdev, 0);
if (dart->irq < 0)
return -ENODEV;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 22ddd05bbdcd..c623dae1e115 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
- size_t size = end - start + 1;
+ size_t size;
+
+ /*
+ * The mm_types defines vm_end as the first byte after the end address,
+ * different from IOMMU subsystem using the last address of an address
+ * range. So do a simple translation here by calculating size correctly.
+ */
+ size = end - start;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 01e9b50b10a1..87bf522b9d2e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
dev_name(dev), err);
}
+static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg,
+ struct device *dev)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ const struct device_node *np = smmu->dev->of_node;
+
+ /*
+ * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
+ * entries to not be invalidated correctly. The problem is that the walk
+ * cache index generated for IOVA is not same across translation and
+ * invalidation requests. This is leading to page faults when PMD entry
+ * is released during unmap and populated with new PTE table during
+ * subsequent map request. Disabling large page mappings avoids the
+ * release of PMD entry and avoid translations seeing stale PMD entry in
+ * walk cache.
+ * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
+ * Tegra234.
+ */
+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
+ smmu->pgsize_bitmap = PAGE_SIZE;
+ pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
+ }
+
+ return 0;
+}
+
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
@@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
.probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
};
static const struct arm_smmu_impl nvidia_smmu_single_impl = {
.probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index df5c62ecf942..0ea47e17b379 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1588,7 +1588,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned long pfn, unsigned int pages,
int ih, int map)
{
- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
u16 did = domain->iommu_did[iommu->seq_id];
@@ -1600,10 +1601,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (domain_use_first_level(domain)) {
qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih);
} else {
+ unsigned long bitmask = aligned_pages - 1;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
/*
* Fallback to domain selective flush if no PSI support or
- * the size is too big. PSI requires page size to be 2 ^ x,
- * and the base address is naturally aligned to the size.
+ * the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 23a38763c1d1..7ee37d996e15 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -757,6 +757,10 @@ bad_req:
goto bad_req;
}
+ /* Drop Stop Marker message. No need for a response. */
+ if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
+ goto prq_advance;
+
if (!svm || svm->pasid != req->pasid) {
/*
* It can't go away, because the driver is not permitted
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f2c45b85b9fc..857d4c2fd1a2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -506,6 +506,13 @@ int iommu_get_group_resv_regions(struct iommu_group *group,
list_for_each_entry(device, &group->devices, list) {
struct list_head dev_resv_regions;
+ /*
+ * Non-API groups still expose reserved_regions in sysfs,
+ * so filter out calls that get here that way.
+ */
+ if (!device->dev->iommu)
+ break;
+
INIT_LIST_HEAD(&dev_resv_regions);
iommu_get_resv_regions(device->dev, &dev_resv_regions);
ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
@@ -3019,7 +3026,7 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- if (WARN_ON(!group))
+ if (WARN_ON(!group) || !group->default_domain)
return -EINVAL;
if (sysfs_streq(buf, "identity"))
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 4aab631ef517..d9cf2820c02e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
sizeof(phandle));
if (num_iommus < 0)
- return 0;
+ return ERR_PTR(-ENODEV);
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 680d2fcf2686..135c156673a7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -257,6 +257,18 @@ config ST_IRQCHIP
help
Enables SysCfg Controlled IRQs on STi based platforms.
+config SUN4I_INTC
+ bool
+
+config SUN6I_R_INTC
+ bool
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+
+config SUNXI_NMI_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+
config TB10X_IRQC
bool
select IRQ_DOMAIN
@@ -433,6 +445,7 @@ config QCOM_PDC
config QCOM_MPM
tristate "QCOM MPM"
depends on ARCH_QCOM
+ depends on MAILBOX
select IRQ_DOMAIN_HIERARCHY
help
MSM Power Manager driver to manage and configure wakeup
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 160a1d8ceaa9..9b1ffb0f98cc 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -23,9 +23,9 @@ obj-$(CONFIG_OMPIC) += irq-ompic.o
obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
-obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
-obj-$(CONFIG_ARCH_SUNXI) += irq-sun6i-r.o
-obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
+obj-$(CONFIG_SUN4I_INTC) += irq-sun4i.o
+obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o
+obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 5b8d571c041d..ee18eb3e72b7 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -209,15 +209,29 @@ static struct msi_domain_info armada_370_xp_msi_domain_info = {
static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
+ unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+
msg->address_lo = lower_32_bits(msi_doorbell_addr);
msg->address_hi = upper_32_bits(msi_doorbell_addr);
- msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
+ msg->data = BIT(cpu + 8) | (data->hwirq + PCI_MSI_DOORBELL_START);
}
static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force)
{
- return -EINVAL;
+ unsigned int cpu;
+
+ if (!force)
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
}
static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
@@ -264,11 +278,21 @@ static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
.free = armada_370_xp_msi_free,
};
-static int armada_370_xp_msi_init(struct device_node *node,
- phys_addr_t main_int_phys_base)
+static void armada_370_xp_msi_reenable_percpu(void)
{
u32 reg;
+ /* Enable MSI doorbell mask and combined cpu local interrupt */
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
+ | PCI_MSI_DOORBELL_MASK;
+ writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ /* Unmask local doorbell interrupt */
+ writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+static int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
msi_doorbell_addr = main_int_phys_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS;
@@ -287,18 +311,13 @@ static int armada_370_xp_msi_init(struct device_node *node,
return -ENOMEM;
}
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
- | PCI_MSI_DOORBELL_MASK;
-
- writel(reg, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
-
- /* Unmask IPI interrupt */
- writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ armada_370_xp_msi_reenable_percpu();
return 0;
}
#else
+static void armada_370_xp_msi_reenable_percpu(void) {}
+
static inline int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
{
@@ -308,7 +327,16 @@ static inline int armada_370_xp_msi_init(struct device_node *node,
static void armada_xp_mpic_perf_init(void)
{
- unsigned long cpuid = cpu_logical_map(smp_processor_id());
+ unsigned long cpuid;
+
+ /*
+ * This Performance Counter Overflow interrupt is specific for
+ * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
+ */
+ if (!of_machine_is_compatible("marvell,armada-370-xp"))
+ return;
+
+ cpuid = cpu_logical_map(smp_processor_id());
/* Enable Performance Counter Overflow interrupts */
writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
@@ -501,6 +529,8 @@ static void armada_xp_mpic_reenable_percpu(void)
}
ipi_resume();
+
+ armada_370_xp_msi_reenable_percpu();
}
static int armada_xp_mpic_starting_cpu(unsigned int cpu)
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index a47db16ff960..9c9fc3e2967e 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -77,8 +77,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
}
i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
- if (i2c_ic->parent_irq < 0) {
- ret = i2c_ic->parent_irq;
+ if (!i2c_ic->parent_irq) {
+ ret = -EINVAL;
goto err_iounmap;
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 18b77c3e6db4..279e92cf0b16 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -157,8 +157,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
}
irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
- rc = irq;
+ if (!irq) {
+ rc = -EINVAL;
goto err;
}
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index fd079215c17f..142a7431745f 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -315,7 +315,7 @@ static int __init bcm6345_l1_of_init(struct device_node *dn,
cpumask_set_cpu(idx, &intc->cpumask);
}
- if (!cpumask_weight(&intc->cpumask)) {
+ if (cpumask_empty(&intc->cpumask)) {
ret = -ENODEV;
goto out_free;
}
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index d36f536506ba..42d8a2438ebc 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -136,11 +136,11 @@ static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
u32 irq_base)
{
if (hwirq == 0)
- return 0;
+ return false;
generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq));
- return 1;
+ return true;
}
/* gx6605s 64 irqs interrupt controller */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cd772973114a..5ff09de6c48f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1624,7 +1624,7 @@ static int its_select_cpu(struct irq_data *d,
cpu = cpumask_pick_least_loaded(d, tmpmask);
} else {
- cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask);
+ cpumask_copy(tmpmask, aff_mask);
/* If we cannot cross sockets, limit the search to that node */
if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
@@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void)
return 0;
}
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
{
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
- val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- val &= ~GICR_VPENDBASER_Valid;
- val &= ~clr;
- val |= set;
- gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
do {
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
@@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
}
} while (!clean && count);
- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ if (unlikely(!clean))
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+
+ return val;
+}
+
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+{
+ u64 val;
+
+ /* Make sure we wait until the RD is done with the initial scan */
+ val = read_vpend_dirty_clear(vlpi_base);
+ val &= ~GICR_VPENDBASER_Valid;
+ val &= ~clr;
+ val |= set;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ val = read_vpend_dirty_clear(vlpi_base);
+ if (unlikely(val & GICR_VPENDBASER_Dirty))
val |= GICR_VPENDBASER_PendingLast;
- }
return val;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0efe1a9a9f3b..2be8dea6b6b0 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
}
}
-static void gic_do_wait_for_rwp(void __iomem *base)
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
{
u32 count = 1000000; /* 1s! */
- while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+ while (readl_relaxed(base + GICD_CTLR) & bit) {
count--;
if (!count) {
pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base)
/* Wait for completion of a distributor change */
static void gic_dist_wait_for_rwp(void)
{
- gic_do_wait_for_rwp(gic_data.dist_base);
+ gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
}
/* Wait for completion of a redistributor change */
static void gic_redist_wait_for_rwp(void)
{
- gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
}
#ifdef CONFIG_ARM64
@@ -352,28 +352,27 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
static void gic_poke_irq(struct irq_data *d, u32 offset)
{
- void (*rwp_wait)(void);
void __iomem *base;
u32 index, mask;
offset = convert_offset_index(d, offset, &index);
mask = 1 << (index % 32);
- if (gic_irq_in_rdist(d)) {
+ if (gic_irq_in_rdist(d))
base = gic_data_rdist_sgi_base();
- rwp_wait = gic_redist_wait_for_rwp;
- } else {
+ else
base = gic_data.dist_base;
- rwp_wait = gic_dist_wait_for_rwp;
- }
writel_relaxed(mask, base + offset + (index / 32) * 4);
- rwp_wait();
}
static void gic_mask_irq(struct irq_data *d)
{
gic_poke_irq(d, GICD_ICENABLER);
+ if (gic_irq_in_rdist(d))
+ gic_redist_wait_for_rwp();
+ else
+ gic_dist_wait_for_rwp();
}
static void gic_eoimode1_mask_irq(struct irq_data *d)
@@ -420,7 +419,11 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
break;
case IRQCHIP_STATE_MASKED:
- reg = val ? GICD_ICENABLER : GICD_ISENABLER;
+ if (val) {
+ gic_mask_irq(d);
+ return 0;
+ }
+ reg = GICD_ISENABLER;
break;
default:
@@ -556,7 +559,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- gic_write_eoir(gic_irq(d));
+ write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
+ isb();
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
@@ -574,7 +578,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
enum gic_intid_range range;
unsigned int irq = gic_irq(d);
- void (*rwp_wait)(void);
void __iomem *base;
u32 offset, index;
int ret;
@@ -590,17 +593,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- if (gic_irq_in_rdist(d)) {
+ if (gic_irq_in_rdist(d))
base = gic_data_rdist_sgi_base();
- rwp_wait = gic_redist_wait_for_rwp;
- } else {
+ else
base = gic_data.dist_base;
- rwp_wait = gic_dist_wait_for_rwp;
- }
offset = convert_offset_index(d, GICD_ICFGR, &index);
- ret = gic_configure_irq(index, type, base + offset, rwp_wait);
+ ret = gic_configure_irq(index, type, base + offset, NULL);
if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
/* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
@@ -640,82 +640,101 @@ static void gic_deactivate_unhandled(u32 irqnr)
if (irqnr < 8192)
gic_write_dir(irqnr);
} else {
- gic_write_eoir(irqnr);
+ write_gicreg(irqnr, ICC_EOIR1_EL1);
+ isb();
}
}
-static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+/*
+ * Follow a read of the IAR with any HW maintenance that needs to happen prior
+ * to invoking the relevant IRQ handler. We must do two things:
+ *
+ * (1) Ensure instruction ordering between a read of IAR and subsequent
+ * instructions in the IRQ handler using an ISB.
+ *
+ * It is possible for the IAR to report an IRQ which was signalled *after*
+ * the CPU took an IRQ exception as multiple interrupts can race to be
+ * recognized by the GIC, earlier interrupts could be withdrawn, and/or
+ * later interrupts could be prioritized by the GIC.
+ *
+ * For devices which are tightly coupled to the CPU, such as PMUs, a
+ * context synchronization event is necessary to ensure that system
+ * register state is not stale, as these may have been indirectly written
+ * *after* exception entry.
+ *
+ * (2) Deactivate the interrupt when EOI mode 1 is in use.
+ */
+static inline void gic_complete_ack(u32 irqnr)
{
- bool irqs_enabled = interrupts_enabled(regs);
- int err;
-
- if (irqs_enabled)
- nmi_enter();
-
if (static_branch_likely(&supports_deactivate_key))
- gic_write_eoir(irqnr);
- /*
- * Leave the PSR.I bit set to prevent other NMIs to be
- * received while handling this one.
- * PSR.I will be restored when we ERET to the
- * interrupted context.
- */
- err = generic_handle_domain_nmi(gic_data.domain, irqnr);
- if (err)
- gic_deactivate_unhandled(irqnr);
+ write_gicreg(irqnr, ICC_EOIR1_EL1);
- if (irqs_enabled)
- nmi_exit();
+ isb();
}
-static u32 do_read_iar(struct pt_regs *regs)
+static bool gic_rpr_is_nmi_prio(void)
{
- u32 iar;
+ if (!gic_supports_nmi())
+ return false;
- if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
- u64 pmr;
+ return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
+}
- /*
- * We were in a context with IRQs disabled. However, the
- * entry code has set PMR to a value that allows any
- * interrupt to be acknowledged, and not just NMIs. This can
- * lead to surprising effects if the NMI has been retired in
- * the meantime, and that there is an IRQ pending. The IRQ
- * would then be taken in NMI context, something that nobody
- * wants to debug twice.
- *
- * Until we sort this, drop PMR again to a level that will
- * actually only allow NMIs before reading IAR, and then
- * restore it to what it was.
- */
- pmr = gic_read_pmr();
- gic_pmr_mask_irqs();
- isb();
+static bool gic_irqnr_is_special(u32 irqnr)
+{
+ return irqnr >= 1020 && irqnr <= 1023;
+}
+
+static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
+{
+ if (gic_irqnr_is_special(irqnr))
+ return;
- iar = gic_read_iar();
+ gic_complete_ack(irqnr);
- gic_write_pmr(pmr);
- } else {
- iar = gic_read_iar();
+ if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
+ WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
+ gic_deactivate_unhandled(irqnr);
}
+}
- return iar;
+static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+{
+ if (gic_irqnr_is_special(irqnr))
+ return;
+
+ gic_complete_ack(irqnr);
+
+ if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
+ WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
+ gic_deactivate_unhandled(irqnr);
+ }
}
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+/*
+ * An exception has been taken from a context with IRQs enabled, and this could
+ * be an IRQ or an NMI.
+ *
+ * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
+ * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
+ * after handling any NMI but before handling any IRQ.
+ *
+ * The entry code has performed IRQ entry, and if an NMI is detected we must
+ * perform NMI entry/exit around invoking the handler.
+ */
+static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
{
+ bool is_nmi;
u32 irqnr;
- irqnr = do_read_iar(regs);
+ irqnr = gic_read_iar();
- /* Check for special IDs first */
- if ((irqnr >= 1020 && irqnr <= 1023))
- return;
+ is_nmi = gic_rpr_is_nmi_prio();
- if (gic_supports_nmi() &&
- unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
- gic_handle_nmi(irqnr, regs);
- return;
+ if (is_nmi) {
+ nmi_enter();
+ __gic_handle_nmi(irqnr, regs);
+ nmi_exit();
}
if (gic_prio_masking_enabled()) {
@@ -723,15 +742,52 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
gic_arch_enable_irqs();
}
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_eoir(irqnr);
- else
- isb();
+ if (!is_nmi)
+ __gic_handle_irq(irqnr, regs);
+}
- if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- gic_deactivate_unhandled(irqnr);
- }
+/*
+ * An exception has been taken from a context with IRQs disabled, which can only
+ * be an NMI.
+ *
+ * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
+ * DAIF.IF (and ICC_PMR_EL1) unchanged.
+ *
+ * The entry code has performed NMI entry.
+ */
+static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
+{
+ u64 pmr;
+ u32 irqnr;
+
+ /*
+ * We were in a context with IRQs disabled. However, the
+ * entry code has set PMR to a value that allows any
+ * interrupt to be acknowledged, and not just NMIs. This can
+ * lead to surprising effects if the NMI has been retired in
+ * the meantime, and that there is an IRQ pending. The IRQ
+ * would then be taken in NMI context, something that nobody
+ * wants to debug twice.
+ *
+ * Until we sort this, drop PMR again to a level that will
+ * actually only allow NMIs before reading IAR, and then
+ * restore it to what it was.
+ */
+ pmr = gic_read_pmr();
+ gic_pmr_mask_irqs();
+ isb();
+ irqnr = gic_read_iar();
+ gic_write_pmr(pmr);
+
+ __gic_handle_nmi(irqnr, regs);
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
+ __gic_handle_irq_from_irqsoff(regs);
+ else
+ __gic_handle_irq_from_irqson(regs);
}
static u32 gic_get_pribits(void)
@@ -807,8 +863,8 @@ static void __init gic_dist_init(void)
for (i = 0; i < GIC_ESPI_NR; i += 4)
writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
- /* Now do the common stuff, and wait for the distributor to drain */
- gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
+ /* Now do the common stuff */
+ gic_dist_config(base, GIC_LINE_NR, NULL);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
@@ -816,8 +872,9 @@ static void __init gic_dist_init(void)
val |= GICD_CTLR_nASSGIreq;
}
- /* Enable distributor with ARE, Group1 */
+ /* Enable distributor with ARE, Group1, and wait for it to drain */
writel_relaxed(val, base + GICD_CTLR);
+ gic_dist_wait_for_rwp();
/*
* Set all global interrupts to the boot CPU only. ARE must be
@@ -919,6 +976,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
+ u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
/* Boot-time cleanip */
if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
@@ -938,9 +996,18 @@ static int __gic_update_rdist_properties(struct redist_region *region,
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
- /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
+ /*
+ * TYPER.RVPEID implies some form of DirectLPI, no matter what the
+ * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
+ * that the ITS driver can make use of for LPIs (and not VLPIs).
+ *
+ * These are 3 different ways to express the same thing, depending
+ * on the revision of the architecture and its relaxations over
+ * time. Just group them under the 'direct_lpi' banner.
+ */
gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+ !!(ctlr & GICR_CTLR_IR) |
gic_data.rdists.has_rvpeid);
gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
@@ -962,7 +1029,11 @@ static void gic_update_rdist_properties(void)
gic_iterate_rdists(__gic_update_rdist_properties);
if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
gic_data.ppi_nr = 0;
- pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
+ pr_info("GICv3 features: %d PPIs%s%s\n",
+ gic_data.ppi_nr,
+ gic_data.has_rss ? ", RSS" : "",
+ gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
+
if (gic_data.rdists.has_vlpis)
pr_info("GICv4 features: %s%s%s\n",
gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
@@ -1284,8 +1355,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
*/
if (enabled)
gic_unmask_irq(d);
- else
- gic_dist_wait_for_rwp();
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -1466,6 +1535,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
if(fwspec->param_count != 2)
return -EINVAL;
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
@@ -1797,8 +1872,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
- pr_info("Distributor has %sRange Selector support\n",
- gic_data.has_rss ? "" : "no ");
if (typer & GICD_TYPER_MBIS) {
err = mbi_init(handle, gic_data.domain);
@@ -1974,10 +2047,10 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
u32 nr_redist_regions;
int err, i;
- dist_base = of_iomap(node, 0);
- if (!dist_base) {
+ dist_base = of_io_request_and_map(node, 0, "GICD");
+ if (IS_ERR(dist_base)) {
pr_err("%pOF: unable to map gic dist registers\n", node);
- return -ENXIO;
+ return PTR_ERR(dist_base);
}
err = gic_validate_dist_version(dist_base);
@@ -2001,8 +2074,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
int ret;
ret = of_address_to_resource(node, 1 + i, &res);
- rdist_regs[i].redist_base = of_iomap(node, 1 + i);
- if (ret || !rdist_regs[i].redist_base) {
+ rdist_regs[i].redist_base = of_io_request_and_map(node, 1 + i, "GICR");
+ if (ret || IS_ERR(rdist_regs[i].redist_base)) {
pr_err("%pOF: couldn't map region %d\n", node, i);
err = -ENODEV;
goto out_unmap_rdist;
@@ -2028,7 +2101,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
out_unmap_rdist:
for (i = 0; i < nr_redist_regions; i++)
- if (rdist_regs[i].redist_base)
+ if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
iounmap(rdist_regs[i].redist_base);
kfree(rdist_regs);
out_unmap_dist:
@@ -2075,6 +2148,7 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
return -ENOMEM;
}
+ request_mem_region(redist->base_address, redist->length, "GICR");
gic_acpi_register_redist(redist->base_address, redist_base);
return 0;
@@ -2097,6 +2171,7 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
+ request_mem_region(gicc->gicr_base_address, size, "GICR");
gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
return 0;
@@ -2298,6 +2373,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
pr_err("Unable to map GICD registers\n");
return -ENOMEM;
}
+ request_mem_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58ba835bee1f..820404cb56bc 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1115,7 +1115,8 @@ static int gic_irq_domain_translate(struct irq_domain *d,
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
/* Make it clear that broken DTs are... broken */
- WARN_ON(*type == IRQ_TYPE_NONE);
+ WARN(*type == IRQ_TYPE_NONE,
+ "HW irq %ld has invalid type\n", *hwirq);
return 0;
}
@@ -1123,10 +1124,17 @@ static int gic_irq_domain_translate(struct irq_domain *d,
if(fwspec->param_count != 2)
return -EINVAL;
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
- WARN_ON(*type == IRQ_TYPE_NONE);
+ WARN(*type == IRQ_TYPE_NONE,
+ "HW irq %ld has invalid type\n", *hwirq);
return 0;
}
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 8d91a02593fc..96230a04ec23 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
@@ -70,7 +71,7 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&data->lock, flags);
}
-static struct irq_chip imx_irqsteer_irq_chip = {
+static const struct irq_chip imx_irqsteer_irq_chip = {
.name = "irqsteer",
.irq_mask = imx_irqsteer_irq_mask,
.irq_unmask = imx_irqsteer_irq_unmask,
@@ -175,7 +176,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
data->irq_count = DIV_ROUND_UP(irqs_num, 64);
data->reg_num = irqs_num / 32;
- if (IS_ENABLED(CONFIG_PM_SLEEP)) {
+ if (IS_ENABLED(CONFIG_PM)) {
data->saved_reg = devm_kzalloc(&pdev->dev,
sizeof(u32) * data->reg_num,
GFP_KERNEL);
@@ -199,6 +200,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out;
}
+ irq_domain_set_pm_device(data->domain, &pdev->dev);
if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
ret = -EINVAL;
@@ -219,6 +221,9 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
out:
clk_disable_unprepare(data->ipg_clk);
@@ -241,7 +246,7 @@ static int imx_irqsteer_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
static void imx_irqsteer_save_regs(struct irqsteer_data *data)
{
int i;
@@ -288,7 +293,10 @@ static int imx_irqsteer_resume(struct device *dev)
#endif
static const struct dev_pm_ops imx_irqsteer_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_irqsteer_suspend, imx_irqsteer_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(imx_irqsteer_suspend,
+ imx_irqsteer_resume, NULL)
};
static const struct of_device_id imx_irqsteer_dt_ids[] = {
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index eea5a753618c..d30614661eea 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -375,7 +375,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
raw_spin_lock_init(&priv->lock);
priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (!priv->base)
+ if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
for (i = 0; i < priv->reg_stride; i++) {
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index abd011fcecf4..c7db617e1a2f 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -37,11 +37,26 @@ struct exiu_irq_data {
u32 spi_base;
};
-static void exiu_irq_eoi(struct irq_data *d)
+static void exiu_irq_ack(struct irq_data *d)
{
struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
writel(BIT(d->hwirq), data->base + EIREQCLR);
+}
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ /*
+ * Level triggered interrupts are latched and must be cleared during
+ * EOI or the interrupt will be jammed on. Of course if a level
+ * triggered interrupt is still asserted then the write will not clear
+ * the interrupt.
+ */
+ if (irqd_is_level_type(d))
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+
irq_chip_eoi_parent(d);
}
@@ -91,10 +106,13 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
writel_relaxed(val, data->base + EILVL);
val = readl_relaxed(data->base + EIEDG);
- if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) {
val &= ~BIT(d->hwirq);
- else
+ irq_set_handler_locked(d, handle_fasteoi_irq);
+ } else {
val |= BIT(d->hwirq);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ }
writel_relaxed(val, data->base + EIEDG);
writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
@@ -104,6 +122,7 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
static struct irq_chip exiu_irq_chip = {
.name = "EXIU",
+ .irq_ack = exiu_irq_ack,
.irq_eoi = exiu_irq_eoi,
.irq_enable = exiu_irq_enable,
.irq_mask = exiu_irq_mask,
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index 4cd3e533740b..a01e44049415 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -249,11 +249,13 @@ static int sun6i_r_intc_domain_alloc(struct irq_domain *domain,
for (i = 0; i < nr_irqs; ++i, ++hwirq, ++virq) {
if (hwirq == nmi_hwirq) {
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &sun6i_r_intc_nmi_chip, 0);
+ &sun6i_r_intc_nmi_chip,
+ NULL);
irq_set_handler(virq, handle_fasteoi_ack_irq);
} else {
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &sun6i_r_intc_wakeup_chip, 0);
+ &sun6i_r_intc_wakeup_chip,
+ NULL);
}
}
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 27933338f7b3..8c581c985aa7 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -151,14 +151,25 @@ static struct irq_chip xtensa_mx_irq_chip = {
.irq_set_affinity = xtensa_mx_irq_set_affinity,
};
+static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
+{
+ unsigned int i;
+
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+
+ /* Initialize default IRQ routing to CPU 0 */
+ for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
+ set_er(1, MIROUT(i));
+}
+
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
- irq_set_default_host(root_domain);
- secondary_init_irq();
+ xtensa_mx_init_common(root_domain);
return 0;
}
@@ -168,8 +179,7 @@ static int __init xtensa_mx_init(struct device_node *np,
struct irq_domain *root_domain =
irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
- irq_set_default_host(root_domain);
- secondary_init_irq();
+ xtensa_mx_init_common(root_domain);
return 0;
}
IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 097577ae3c47..ce13c272c387 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -336,7 +336,7 @@ static int bch_allocator_thread(void *arg)
mutex_unlock(&ca->set->bucket_lock);
blkdev_issue_discard(ca->bdev,
bucket_to_sector(ca->set, bucket),
- ca->sb.bucket_size, GFP_KERNEL, 0);
+ ca->sb.bucket_size, GFP_KERNEL);
mutex_lock(&ca->set->bucket_lock);
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 6230dfdd9286..7510d1c983a5 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -107,15 +107,16 @@ void bch_btree_verify(struct btree *b)
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
+ unsigned int nr_segs = bio_segments(bio);
struct bio *check;
struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 };
- check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
+ check = bio_kmalloc(nr_segs, GFP_NOIO);
if (!check)
return;
- bio_set_dev(check, bio->bi_bdev);
- check->bi_opf = REQ_OP_READ;
+ bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
+ REQ_OP_READ);
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
@@ -146,7 +147,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bio_free_pages(check);
out_put:
- bio_put(check);
+ bio_uninit(check);
+ kfree(check);
}
#endif
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7c2ca52ca3e4..df5347ea450b 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -771,12 +771,12 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio, ca->bdev, REQ_OP_WRITE |
REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
- bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
+ bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index fdd0194f84dd..9c5dde73da88 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
+ bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
@@ -1005,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
bio_get(s->iop.bio);
if (bio_op(bio) == REQ_OP_DISCARD &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ !bdev_max_discard_sectors(dc->bdev))
goto insert_data;
/* I/O request sent to backing device */
@@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
bio->bi_private = ddip;
if ((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ !bdev_max_discard_sectors(dc->bdev))
bio->bi_end_io(bio);
else
submit_bio_noacct(bio);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index bf3de149d3c9..2f49e31142f6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -973,7 +973,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
blk_queue_write_cache(q, true, true);
@@ -2350,7 +2349,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
ca->bdev->bd_holder = ca;
ca->sb_disk = sb_disk;
- if (blk_queue_discard(bdev_get_queue(bdev)))
+ if (bdev_max_discard_sectors((bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index d1029d71ff3b..c6f677059214 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1151,7 +1151,7 @@ STORE(__bch_cache)
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
- if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ if (bdev_max_discard_sectors(ca->bdev))
ca->discard = v;
if (v != CACHE_DISCARD(&ca->sb)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index e9cbc70d5a0e..5ffa1dcf84cf 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -611,7 +611,8 @@ static void bio_complete(struct bio *bio)
{
struct dm_buffer *b = bio->bi_private;
blk_status_t status = bio->bi_status;
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
b->end_io(b, status);
}
@@ -626,16 +627,14 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
vec_size += 2;
- bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
+ bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
use_dmio(b, rw, sector, n_sectors, offset);
return;
}
-
+ bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, b->c->bdev);
- bio_set_op_attrs(bio, rw, 0);
bio->bi_end_io = bio_complete;
bio->bi_private = b;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 780a61bc6cc0..28c5de8eca4a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3329,13 +3329,6 @@ static int cache_iterate_devices(struct dm_target *ti,
return r;
}
-static bool origin_dev_supports_discard(struct block_device *origin_bdev)
-{
- struct request_queue *q = bdev_get_queue(origin_bdev);
-
- return blk_queue_discard(q);
-}
-
/*
* If discard_passdown was enabled verify that the origin device
* supports discards. Disable discard_passdown if not.
@@ -3349,7 +3342,7 @@ static void disable_passdown_if_not_supported(struct cache *cache)
if (!cache->features.discard_passdown)
return;
- if (!origin_dev_supports_discard(origin_bdev))
+ if (!bdev_max_discard_sectors(origin_bdev))
reason = "discard unsupported";
else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 128316a73d01..811b0a5379d0 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2016,13 +2016,6 @@ static void clone_resume(struct dm_target *ti)
do_waker(&clone->waker.work);
}
-static bool bdev_supports_discards(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- return (q && blk_queue_discard(q));
-}
-
/*
* If discard_passdown was enabled verify that the destination device supports
* discards. Disable discard_passdown if not.
@@ -2036,7 +2029,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
return;
- if (!bdev_supports_discards(dest_dev))
+ if (!bdev_max_discard_sectors(dest_dev))
reason = "discard unsupported";
else if (dest_limits->max_discard_sectors < clone->region_size)
reason = "max discard sectors smaller than a region";
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ad2d5faa2ebb..36ae30b73a6e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4399,6 +4399,7 @@ try_smaller_buffer:
}
if (ic->internal_hash) {
+ size_t recalc_tags_size;
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -4412,8 +4413,10 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
- ic->tag_size, GFP_KERNEL);
+ recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 5762366333a2..e4b95eaeec8c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -311,7 +311,7 @@ static void do_region(int op, int op_flags, unsigned region,
* Reject unsupported discard and write same requests.
*/
if (op == REQ_OP_DISCARD)
- special_cmd_max_sectors = q->limits.max_discard_sectors;
+ special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
else if (op == REQ_OP_WRITE_ZEROES)
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index c9d036d6bb2e..e194226c89e5 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -866,9 +866,8 @@ static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct log_writes_c *lc = ti->private;
- struct request_queue *q = bdev_get_queue(lc->dev->bdev);
- if (!q || !blk_queue_discard(q)) {
+ if (!bdev_max_discard_sectors(lc->dev->bdev)) {
lc->device_supports_discard = false;
limits->discard_granularity = lc->sectorsize;
limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 875bca30a0dd..82f2a06153dc 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -27,7 +27,6 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/sched/clock.h>
#define DM_MSG_PREFIX "multipath historical-service-time"
@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
- u64 time_now = sched_clock();
+ u64 time_now = ktime_get_ns();
struct dm_path *ret = NULL;
unsigned long flags;
@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
static u64 path_service_time(struct path_info *pi, u64 start_time)
{
- u64 sched_now = ktime_get_ns();
+ u64 now = ktime_get_ns();
/* if a previous disk request has finished after this IO was
* sent to the hardware, pretend the submission happened
@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
if (time_after64(pi->last_finish, start_time))
start_time = pi->last_finish;
- pi->last_finish = sched_now;
- if (time_before64(sched_now, start_time))
+ pi->last_finish = now;
+ if (time_before64(now, start_time))
return 0;
- return sched_now - start_time;
+ return now - start_time;
}
static int hst_end_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2b26435a6946..9526ccbedafb 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2963,13 +2963,8 @@ static void configure_discard_support(struct raid_set *rs)
raid456 = rs_is_raid456(rs);
for (i = 0; i < rs->raid_disks; i++) {
- struct request_queue *q;
-
- if (!rs->dev[i].rdev.bdev)
- continue;
-
- q = bdev_get_queue(rs->dev[i].rdev.bdev);
- if (!q || !blk_queue_discard(q))
+ if (!rs->dev[i].rdev.bdev ||
+ !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
return;
if (raid456) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 03541cfc2317..e7d42f6335a2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1820,9 +1820,7 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_nonrot(q);
+ return !bdev_nonrot(dev->bdev);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
@@ -1890,9 +1888,7 @@ static bool dm_table_supports_nowait(struct dm_table *t)
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_discard(q);
+ return !bdev_max_discard_sectors(dev->bdev);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1924,9 +1920,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_secure_erase(q);
+ return !bdev_max_secure_erase_sectors(dev->bdev);
}
static bool dm_table_supports_secure_erase(struct dm_table *t)
@@ -1952,9 +1946,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return blk_queue_stable_writes(q);
+ return bdev_stable_writes(dev->bdev);
}
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1974,18 +1966,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
if (!dm_table_supports_discards(t)) {
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- /* Must also clear discard limits... */
q->limits.max_discard_sectors = 0;
q->limits.max_hw_discard_sectors = 0;
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
q->limits.discard_misaligned = 0;
- } else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ }
- if (dm_table_supports_secure_erase(t))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+ if (!dm_table_supports_secure_erase(t))
+ q->limits.max_secure_erase_sectors = 0;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4d25d0e27031..84c083f76673 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -398,8 +398,8 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, 0, &op->bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
+ &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -2802,13 +2802,6 @@ static void requeue_bios(struct pool *pool)
/*----------------------------------------------------------------
* Binding of control targets to a pool object
*--------------------------------------------------------------*/
-static bool data_dev_supports_discard(struct pool_c *pt)
-{
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
- return blk_queue_discard(q);
-}
-
static bool is_factor(sector_t block_size, uint32_t n)
{
return !sector_div(block_size, n);
@@ -2828,7 +2821,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
if (!pt->adjusted_pf.discard_passdown)
return;
- if (!data_dev_supports_discard(pt))
+ if (!bdev_max_discard_sectors(pt->data_dev->bdev))
reason = "discard unsupported";
else if (data_limits->max_discard_sectors < pool->sectors_per_block)
@@ -4057,8 +4050,6 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
/*
* Must explicitly disallow stacking discard limits otherwise the
* block layer will stack them if pool's data device has support.
- * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
- * user to see that, so make sure to set all discard limits to 0.
*/
limits->discard_granularity = 0;
return;
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index c1ca9be4b79e..57daa86c19cf 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
return 0;
}
+struct orig_bio_details {
+ unsigned int op;
+ unsigned int nr_sectors;
+};
+
/*
* First phase of BIO mapping for targets with zone append emulation:
* check all BIO that change a zone writer pointer and change zone
* append operations into regular write operations.
*/
static bool dm_zone_map_bio_begin(struct mapped_device *md,
- struct bio *orig_bio, struct bio *clone)
+ unsigned int zno, struct bio *clone)
{
sector_t zsectors = blk_queue_zone_sectors(md->queue);
- unsigned int zno = bio_zone_no(orig_bio);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/*
@@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
}
- switch (bio_op(orig_bio)) {
+ switch (bio_op(clone)) {
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
return true;
@@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
* target zone.
*/
clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
- (orig_bio->bi_opf & (~REQ_OP_MASK));
- clone->bi_iter.bi_sector =
- orig_bio->bi_iter.bi_sector + zwp_offset;
+ (clone->bi_opf & (~REQ_OP_MASK));
+ clone->bi_iter.bi_sector += zwp_offset;
break;
default:
DMWARN_LIMIT("Invalid BIO operation");
@@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
* data written to a zone. Note that at this point, the remapped clone BIO
* may already have completed, so we do not touch it.
*/
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
- struct bio *orig_bio,
+static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
+ struct orig_bio_details *orig_bio_details,
unsigned int nr_sectors)
{
- unsigned int zno = bio_zone_no(orig_bio);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/* The clone BIO may already have been completed and failed */
@@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
return BLK_STS_IOERR;
/* Update the zone wp offset */
- switch (bio_op(orig_bio)) {
+ switch (orig_bio_details->op) {
case REQ_OP_ZONE_RESET:
WRITE_ONCE(md->zwp_offset[zno], 0);
return BLK_STS_OK;
@@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
* Check that the target did not truncate the write operation
* emulating a zone append.
*/
- if (nr_sectors != bio_sectors(orig_bio)) {
+ if (nr_sectors != orig_bio_details->nr_sectors) {
DMWARN_LIMIT("Truncated write for zone append");
return BLK_STS_IOERR;
}
@@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q,
bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
-static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
+static bool dm_need_zone_wp_tracking(struct bio *bio)
{
/*
* Special processing is not needed for operations that do not need the
@@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
* zones and all operations that do not modify directly a sequential
* zone write pointer.
*/
- if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio))
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
- switch (bio_op(orig_bio)) {
+ switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
case REQ_OP_ZONE_APPEND:
- return bio_zone_is_seq(orig_bio);
+ return bio_zone_is_seq(bio);
default:
return false;
}
@@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
struct dm_target *ti = tio->ti;
struct mapped_device *md = io->md;
struct request_queue *q = md->queue;
- struct bio *orig_bio = io->orig_bio;
struct bio *clone = &tio->clone;
+ struct orig_bio_details orig_bio_details;
unsigned int zno;
blk_status_t sts;
int r;
@@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* IOs that do not change a zone write pointer do not need
* any additional special processing.
*/
- if (!dm_need_zone_wp_tracking(orig_bio))
+ if (!dm_need_zone_wp_tracking(clone))
return ti->type->map(ti, clone);
/* Lock the target zone */
- zno = bio_zone_no(orig_bio);
+ zno = bio_zone_no(clone);
dm_zone_lock(q, zno, clone);
+ orig_bio_details.nr_sectors = bio_sectors(clone);
+ orig_bio_details.op = bio_op(clone);
+
/*
* Check that the bio and the target zone write pointer offset are
* both valid, and if the bio is a zone append, remap it to a write.
*/
- if (!dm_zone_map_bio_begin(md, orig_bio, clone)) {
+ if (!dm_zone_map_bio_begin(md, zno, clone)) {
dm_zone_unlock(q, zno, clone);
return DM_MAPIO_KILL;
}
@@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* The target submitted the clone BIO. The target zone will
* be unlocked on completion of the clone.
*/
- sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+ sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+ *tio->len_ptr);
break;
case DM_MAPIO_REMAPPED:
/*
@@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* unlock the target zone here as the clone will not be
* submitted.
*/
- sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+ sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+ *tio->len_ptr);
if (sts != BLK_STS_OK)
dm_zone_unlock(q, zno, clone);
break;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index cac295cc8840..0ec5d8b9b1a4 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1001,7 +1001,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
- limits->discard_alignment = DMZ_BLOCK_SIZE;
+ limits->discard_alignment = 0;
limits->discard_granularity = DMZ_BLOCK_SIZE;
limits->max_discard_sectors = chunk_sectors;
limits->max_hw_discard_sectors = chunk_sectors;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c5fad7c4ee6..39081338ca61 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -955,7 +955,6 @@ void disable_discard(struct mapped_device *md)
/* device doesn't really support DISCARD, disable it */
limits->max_discard_sectors = 0;
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
}
void disable_write_zeroes(struct mapped_device *md)
@@ -982,7 +981,7 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
- !q->limits.max_discard_sectors)
+ !bdev_max_discard_sectors(bio->bi_bdev))
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!q->limits.max_write_zeroes_sectors)
@@ -1323,8 +1322,7 @@ static void __map_bio(struct bio *clone)
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios,
- unsigned *len)
+ struct dm_target *ti, unsigned num_bios)
{
struct bio *bio;
int try;
@@ -1335,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
- bio = alloc_tio(ci, ti, bio_nr, len,
+ bio = alloc_tio(ci, ti, bio_nr, NULL,
try ? GFP_NOIO : GFP_NOWAIT);
if (!bio)
break;
@@ -1363,11 +1361,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
break;
case 1:
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
- dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
break;
default:
- alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
while ((clone = bio_list_pop(&blist))) {
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
@@ -1392,6 +1390,7 @@ static void __send_empty_flush(struct clone_info *ci)
ci->bio = &flush_bio;
ci->sector_count = 0;
+ ci->io->tio.clone.bi_iter.bi_size = 0;
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1407,14 +1406,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
- /*
- * dm_accept_partial_bio cannot be used with duplicate bios,
- * so update clone_info cursor before __send_duplicate_bios().
- */
+ __send_duplicate_bios(ci, ti, num_bios, &len);
+
ci->sector += len;
ci->sector_count -= len;
-
- __send_duplicate_bios(ci, ti, num_bios, &len);
}
static bool is_abnormal_io(struct bio *bio)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index bfd6026d7809..d87f674ab762 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -639,14 +639,6 @@ re_read:
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- /* Setup nodes/clustername only if bitmap version is
- * cluster-compatible
- */
- if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
- nodes = le32_to_cpu(sb->nodes);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name,
- sb->cluster_name, 64);
- }
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -668,6 +660,16 @@ re_read:
goto out;
}
+ /*
+ * Setup nodes/clustername only if bitmap version is
+ * cluster-compatible
+ */
+ if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
+ nodes = le32_to_cpu(sb->nodes);
+ strscpy(bitmap->mddev->bitmap_info.cluster_name,
+ sb->cluster_name, 64);
+ }
+
/* keep the array size field of the bitmap superblock up to date */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
@@ -695,14 +697,13 @@ re_read:
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
err = 0;
out:
kunmap_atomic(sb);
- /* Assigning chunksize is required for "re_read" */
- bitmap->mddev->bitmap_info.chunksize = chunksize;
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
+ /* Assigning chunksize is required for "re_read" */
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_warn("%s: Could not setup cluster service (%d)\n",
@@ -713,18 +714,18 @@ out:
goto re_read;
}
-
out_no_sb:
- if (test_bit(BITMAP_STALE, &bitmap->flags))
- bitmap->events_cleared = bitmap->mddev->events;
- bitmap->mddev->bitmap_info.chunksize = chunksize;
- bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
- bitmap->mddev->bitmap_info.max_write_behind = write_behind;
- bitmap->mddev->bitmap_info.nodes = nodes;
- if (bitmap->mddev->bitmap_info.space == 0 ||
- bitmap->mddev->bitmap_info.space > sectors_reserved)
- bitmap->mddev->bitmap_info.space = sectors_reserved;
- if (err) {
+ if (err == 0) {
+ if (test_bit(BITMAP_STALE, &bitmap->flags))
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.nodes = nodes;
+ if (bitmap->mddev->bitmap_info.space == 0 ||
+ bitmap->mddev->bitmap_info.space > sectors_reserved)
+ bitmap->mddev->bitmap_info.space = sectors_reserved;
+ } else {
md_bitmap_print_sb(bitmap);
if (bitmap->cluster_slot < 0)
md_cluster_stop(bitmap->mddev);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 1c8a06b77c85..37cbcce3cc66 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -201,7 +201,7 @@ static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name);
goto out_err;
}
- strlcpy(res->name, name, namelen + 1);
+ strscpy(res->name, name, namelen + 1);
if (with_lvb) {
res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL);
if (!res->lksb.sb_lvbptr) {
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 0f55b079371b..138a3b25c5c8 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -64,7 +64,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
struct linear_conf *conf;
struct md_rdev *rdev;
int i, cnt;
- bool discard_supported = false;
conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
if (!conf)
@@ -96,9 +95,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
conf->array_sectors += rdev->sectors;
cnt++;
-
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
}
if (cnt != raid_disks) {
pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
@@ -106,11 +102,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
goto out;
}
- if (!discard_supported)
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
-
/*
* Here we calculate the device offsets.
*/
@@ -252,7 +243,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
+ !bdev_max_discard_sectors(bio->bi_bdev))) {
/* Just ignore it */
bio_endio(bio);
} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 309b3af906ad..707e802d0082 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2627,14 +2627,16 @@ static void sync_sbs(struct mddev *mddev, int nospares)
static bool does_sb_need_changing(struct mddev *mddev)
{
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL, *iter;
struct mdp_superblock_1 *sb;
int role;
/* Find a good rdev */
- rdev_for_each(rdev, mddev)
- if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
+ rdev_for_each(iter, mddev)
+ if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
+ rdev = iter;
break;
+ }
/* No good device found. */
if (!rdev)
@@ -2645,11 +2647,11 @@ static bool does_sb_need_changing(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
/* Device activated? */
- if (role == 0xffff && rdev->raid_disk >=0 &&
+ if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags))
return true;
/* Device turned faulty? */
- if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
+ if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
return true;
}
@@ -2984,10 +2986,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
- if (test_bit(Faulty, &rdev->flags))
- err = 0;
- else
+
+ if (test_bit(MD_BROKEN, &rdev->mddev->flags))
err = -EBUSY;
+ else
+ err = 0;
} else if (cmd_match(buf, "remove")) {
if (rdev->mddev->pers) {
clear_bit(Blocked, &rdev->flags);
@@ -4028,7 +4031,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
oldpriv = mddev->private;
mddev->pers = pers;
mddev->private = priv;
- strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
@@ -4353,10 +4356,9 @@ __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
* like active, but no writes have been seen for a while (100msec).
*
* broken
- * RAID0/LINEAR-only: same as clean, but array is missing a member.
- * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
- * when a member is gone, so this state will at least alert the
- * user that something is wrong.
+* Array is failed. It's useful because mounted-arrays aren't stopped
+* when array is failed, so this state will at least alert the user that
+* something is wrong.
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, broken, bad_word};
@@ -5763,7 +5765,7 @@ static int add_named_array(const char *val, const struct kernel_param *kp)
len--;
if (len >= DISK_NAME_LEN)
return -E2BIG;
- strlcpy(buf, val, len+1);
+ strscpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) == 0)
return md_alloc(0, buf);
if (strncmp(buf, "md", 2) == 0 &&
@@ -5896,7 +5898,7 @@ int md_run(struct mddev *mddev)
mddev->level = pers->level;
mddev->new_level = pers->level;
}
- strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
@@ -5991,8 +5993,7 @@ int md_run(struct mddev *mddev)
bool nonrot = true;
rdev_for_each(rdev, mddev) {
- if (rdev->raid_disk >= 0 &&
- !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+ if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
nonrot = false;
break;
}
@@ -7444,7 +7445,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
err = -ENODEV;
else {
md_error(mddev, rdev);
- if (!test_bit(Faulty, &rdev->flags))
+ if (test_bit(MD_BROKEN, &mddev->flags))
err = -EBUSY;
}
rcu_read_unlock();
@@ -7985,13 +7986,16 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
if (!mddev->pers || !mddev->pers->error_handler)
return;
- mddev->pers->error_handler(mddev,rdev);
- if (mddev->degraded)
+ mddev->pers->error_handler(mddev, rdev);
+
+ if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
+ if (!test_bit(MD_BROKEN, &mddev->flags)) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event();
@@ -8585,7 +8589,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
{
struct bio *discard_bio = NULL;
- if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0,
+ if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
&discard_bio) || !discard_bio)
return;
@@ -9671,7 +9675,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
if (test_bit(Candidate, &rdev2->flags)) {
- if (role == 0xfffe) {
+ if (role == MD_DISK_ROLE_FAULTY) {
pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
md_kick_rdev_from_array(rdev2);
continue;
@@ -9684,7 +9688,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
/*
* got activated except reshape is happening.
*/
- if (rdev2->raid_disk == -1 && role != 0xffff &&
+ if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role;
@@ -9701,7 +9705,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
* as faulty. The recovery is performed by the
* one who initiated the error.
*/
- if ((role == 0xfffe) || (role == 0xfffd)) {
+ if (role == MD_DISK_ROLE_FAULTY ||
+ role == MD_DISK_ROLE_JOURNAL) {
md_error(mddev, rdev2);
clear_bit(Blocked, &rdev2->flags);
}
@@ -9791,16 +9796,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
void md_reload_sb(struct mddev *mddev, int nr)
{
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL, *iter;
int err;
/* Find the rdev */
- rdev_for_each_rcu(rdev, mddev) {
- if (rdev->desc_nr == nr)
+ rdev_for_each_rcu(iter, mddev) {
+ if (iter->desc_nr == nr) {
+ rdev = iter;
break;
+ }
}
- if (!rdev || rdev->desc_nr != nr) {
+ if (!rdev) {
pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
return;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 6ac283864533..cf2cbb17acbd 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -234,34 +234,42 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
-/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
+/**
+ * enum mddev_flags - md device flags.
+ * @MD_ARRAY_FIRST_USE: First use of array, needs initialization.
+ * @MD_CLOSING: If set, we are closing the array, do not open it then.
+ * @MD_JOURNAL_CLEAN: A raid with journal is already clean.
+ * @MD_HAS_JOURNAL: The raid array has journal feature set.
+ * @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took
+ * resync lock, need to release the lock.
+ * @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as
+ * calls to md_error() will never cause the array to
+ * become failed.
+ * @MD_HAS_PPL: The raid array has PPL feature set.
+ * @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set.
+ * @MD_ALLOW_SB_UPDATE: md_check_recovery is allowed to update the metadata
+ * without taking reconfig_mutex.
+ * @MD_UPDATING_SB: md_check_recovery is updating the metadata without
+ * explicitly holding reconfig_mutex.
+ * @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
+ * array is ready yet.
+ * @MD_BROKEN: This is used to stop writes and mark array as failed.
+ *
+ * change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
+ */
enum mddev_flags {
- MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
- MD_CLOSING, /* If set, we are closing the array, do not open
- * it then */
- MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
- MD_HAS_JOURNAL, /* The raid array has journal feature set */
- MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
- * already took resync lock, need to
- * release the lock */
- MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
- * supported as calls to md_error() will
- * never cause the array to become failed.
- */
- MD_HAS_PPL, /* The raid array has PPL feature set */
- MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
- MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
- * the metadata without taking reconfig_mutex.
- */
- MD_UPDATING_SB, /* md_check_recovery is updating the metadata
- * without explicitly holding reconfig_mutex.
- */
- MD_NOT_READY, /* do_md_run() is active, so 'array_state'
- * must not report that array is ready yet
- */
- MD_BROKEN, /* This is used in RAID-0/LINEAR only, to stop
- * I/O in case an array member is gone/failed.
- */
+ MD_ARRAY_FIRST_USE,
+ MD_CLOSING,
+ MD_JOURNAL_CLEAN,
+ MD_HAS_JOURNAL,
+ MD_CLUSTER_RESYNC_LOCKED,
+ MD_FAILFAST_SUPPORTED,
+ MD_HAS_PPL,
+ MD_HAS_MULTIPLE_PPLS,
+ MD_ALLOW_SB_UPDATE,
+ MD_UPDATING_SB,
+ MD_NOT_READY,
+ MD_BROKEN,
};
enum mddev_sb_flags {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b21e101183f4..e11701e394ca 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -128,21 +128,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
- if (conf->nr_strip_zones == 1) {
- conf->layout = RAID0_ORIG_LAYOUT;
- } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
- mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
- conf->layout = mddev->layout;
- } else if (default_layout == RAID0_ORIG_LAYOUT ||
- default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
- conf->layout = default_layout;
- } else {
- pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
- mdname(mddev));
- pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
- err = -ENOTSUPP;
- goto abort;
- }
/*
* now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size
@@ -273,6 +258,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
(unsigned long long)smallest->sectors);
}
+ if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
+ conf->layout = RAID0_ORIG_LAYOUT;
+ } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
+ mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = mddev->layout;
+ } else if (default_layout == RAID0_ORIG_LAYOUT ||
+ default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = default_layout;
+ } else {
+ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+ mdname(mddev));
+ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
+ err = -EOPNOTSUPP;
+ goto abort;
+ }
+
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -399,7 +400,6 @@ static int raid0_run(struct mddev *mddev)
conf = mddev->private;
if (mddev->queue) {
struct md_rdev *rdev;
- bool discard_supported = false;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
@@ -412,13 +412,7 @@ static int raid0_run(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
}
- if (!discard_supported)
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
}
/* calculate array device size */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 99d5464a51f8..99d5af1362d7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -165,9 +165,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios : 1 for reading, n-1 for writing
*/
for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio;
}
/*
@@ -206,8 +207,10 @@ out_free_pages:
resync_free_pages(&rps[j]);
out_free_bio:
- while (++j < pi->raid_disks)
- bio_put(r1_bio->bios[j]);
+ while (++j < pi->raid_disks) {
+ bio_uninit(r1_bio->bios[j]);
+ kfree(r1_bio->bios[j]);
+ }
kfree(rps);
out_free_r1bio:
@@ -225,7 +228,8 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
for (i = pi->raid_disks; i--; ) {
rp = get_resync_pages(r1bio->bios[i]);
resync_free_pages(rp);
- bio_put(r1bio->bios[i]);
+ bio_uninit(r1bio->bios[i]);
+ kfree(r1bio->bios[i]);
}
/* resync pages array stored in the 1st bio's .bi_private */
@@ -704,7 +708,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
/* At least two disks to choose from so failfast is OK */
set_bit(R1BIO_FailFast, &r1_bio->state);
- nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+ nonrot = bdev_nonrot(rdev->bdev);
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
dist = abs(this_sector - conf->mirrors[disk].head_position);
@@ -802,7 +806,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+ !bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1637,30 +1641,39 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "]");
}
+/**
+ * raid1_error() - RAID1 error handler.
+ * @mddev: affected md device.
+ * @rdev: member device to fail.
+ *
+ * The routine acknowledges &rdev failure and determines new @mddev state.
+ * If it failed, then:
+ * - &MD_BROKEN flag is set in &mddev->flags.
+ * - recovery is disabled.
+ * Otherwise, it must be degraded:
+ * - recovery is interrupted.
+ * - &mddev->degraded is bumped.
+ *
+ * @rdev is marked as &Faulty excluding case when array is failed and
+ * &mddev->fail_last_dev is off.
+ */
static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
unsigned long flags;
- /*
- * If it is not operational, then we have already marked it as dead
- * else if it is the last working disks with "fail_last_dev == false",
- * ignore the error, let the next level up know.
- * else mark the drive as failed
- */
spin_lock_irqsave(&conf->device_lock, flags);
- if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
- && (conf->raid_disks - mddev->degraded) == 1) {
- /*
- * Don't fail the drive, act as though we were just a
- * normal single drive.
- * However don't try a recovery from this drive as
- * it is very likely to fail.
- */
- conf->recovery_disabled = mddev->recovery_disabled;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- return;
+
+ if (test_bit(In_sync, &rdev->flags) &&
+ (conf->raid_disks - mddev->degraded) == 1) {
+ set_bit(MD_BROKEN, &mddev->flags);
+
+ if (!mddev->fail_last_dev) {
+ conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
}
set_bit(Blocked, &rdev->flags);
if (test_and_clear_bit(In_sync, &rdev->flags))
@@ -1826,8 +1839,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
- if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
}
@@ -3106,7 +3117,6 @@ static int raid1_run(struct mddev *mddev)
int i;
struct md_rdev *rdev;
int ret;
- bool discard_supported = false;
if (mddev->level != 1) {
pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -3141,8 +3151,6 @@ static int raid1_run(struct mddev *mddev)
continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
}
mddev->degraded = 0;
@@ -3179,15 +3187,6 @@ static int raid1_run(struct mddev *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- if (mddev->queue) {
- if (discard_supported)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
- mddev->queue);
- }
-
ret = md_integrity_register(mddev);
if (ret) {
md_unregister_thread(&mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dfe7d62d3fbd..dfa576cdf11c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -145,15 +145,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios.
*/
for (j = nalloc ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r10_bio->devs[j].bio = bio;
if (!conf->have_replacement)
continue;
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r10_bio->devs[j].repl_bio = bio;
}
/*
@@ -197,9 +199,11 @@ out_free_pages:
out_free_bio:
for ( ; j < nalloc; j++) {
if (r10_bio->devs[j].bio)
- bio_put(r10_bio->devs[j].bio);
+ bio_uninit(r10_bio->devs[j].bio);
+ kfree(r10_bio->devs[j].bio);
if (r10_bio->devs[j].repl_bio)
- bio_put(r10_bio->devs[j].repl_bio);
+ bio_uninit(r10_bio->devs[j].repl_bio);
+ kfree(r10_bio->devs[j].repl_bio);
}
kfree(rps);
out_free_r10bio:
@@ -220,12 +224,15 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
if (bio) {
rp = get_resync_pages(bio);
resync_free_pages(rp);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
bio = r10bio->devs[j].repl_bio;
- if (bio)
- bio_put(bio);
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
}
/* resync pages array stored in the 1st bio's .bi_private */
@@ -796,7 +803,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
- nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+ nonrot = bdev_nonrot(rdev->bdev);
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
if (min_pending > pending && nonrot) {
@@ -888,7 +895,7 @@ static void flush_pending_writes(struct r10conf *conf)
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+ !bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1083,7 +1090,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+ !bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1963,32 +1970,40 @@ static int enough(struct r10conf *conf, int ignore)
_enough(conf, 1, ignore);
}
+/**
+ * raid10_error() - RAID10 error handler.
+ * @mddev: affected md device.
+ * @rdev: member device to fail.
+ *
+ * The routine acknowledges &rdev failure and determines new @mddev state.
+ * If it failed, then:
+ * - &MD_BROKEN flag is set in &mddev->flags.
+ * Otherwise, it must be degraded:
+ * - recovery is interrupted.
+ * - &mddev->degraded is bumped.
+
+ * @rdev is marked as &Faulty excluding case when array is failed and
+ * &mddev->fail_last_dev is off.
+ */
static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r10conf *conf = mddev->private;
unsigned long flags;
- /*
- * If it is not operational, then we have already marked it as dead
- * else if it is the last working disks with "fail_last_dev == false",
- * ignore the error, let the next level up know.
- * else mark the drive as failed
- */
spin_lock_irqsave(&conf->device_lock, flags);
- if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
- && !enough(conf, rdev->raid_disk)) {
- /*
- * Don't fail the drive, just return an IO error.
- */
- spin_unlock_irqrestore(&conf->device_lock, flags);
- return;
+
+ if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
+ set_bit(MD_BROKEN, &mddev->flags);
+
+ if (!mddev->fail_last_dev) {
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
}
if (test_and_clear_bit(In_sync, &rdev->flags))
mddev->degraded++;
- /*
- * If recovery is running, make sure it aborts.
- */
+
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
@@ -2144,8 +2159,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
- if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
@@ -4069,7 +4082,6 @@ static int raid10_run(struct mddev *mddev)
sector_t size;
sector_t min_offset_diff = 0;
int first = 1;
- bool discard_supported = false;
if (mddev_init_writes_pending(mddev) < 0)
return -ENOMEM;
@@ -4140,20 +4152,9 @@ static int raid10_run(struct mddev *mddev)
rdev->data_offset << 9);
disk->head_position = 0;
-
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
first = 0;
}
- if (mddev->queue) {
- if (discard_supported)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
- mddev->queue);
- }
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
pr_err("md/raid10:%s: not enough operational mirrors.\n",
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index a7d50ff9020a..094a4042589e 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1318,7 +1318,7 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
r5l_write_super(log, end);
- if (!blk_queue_discard(bdev_get_queue(bdev)))
+ if (!bdev_max_discard_sectors(bdev))
return;
mddev = log->rdev->mddev;
@@ -1344,14 +1344,14 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
if (log->last_checkpoint < end) {
blkdev_issue_discard(bdev,
log->last_checkpoint + log->rdev->data_offset,
- end - log->last_checkpoint, GFP_NOIO, 0);
+ end - log->last_checkpoint, GFP_NOIO);
} else {
blkdev_issue_discard(bdev,
log->last_checkpoint + log->rdev->data_offset,
log->device_size - log->last_checkpoint,
- GFP_NOIO, 0);
+ GFP_NOIO);
blkdev_issue_discard(bdev, log->rdev->data_offset, end,
- GFP_NOIO, 0);
+ GFP_NOIO);
}
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index d3962d92df18..55d065a87b89 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -883,7 +883,9 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)r_sector, dd_idx,
(unsigned long long)sector);
- rdev = conf->disks[dd_idx].rdev;
+ /* Array has not started so rcu dereference is safe */
+ rdev = rcu_dereference_protected(
+ conf->disks[dd_idx].rdev, 1);
if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
sector >= rdev->recovery_offset)) {
pr_debug("%s:%*s data member disk %d missing\n",
@@ -934,7 +936,10 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
parity_sector = raid5_compute_sector(conf, r_sector_first + i,
0, &disk, &sh);
BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
- parity_rdev = conf->disks[sh.pd_idx].rdev;
+
+ /* Array has not started so rcu dereference is safe */
+ parity_rdev = rcu_dereference_protected(
+ conf->disks[sh.pd_idx].rdev, 1);
BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
@@ -1404,7 +1409,9 @@ int ppl_init_log(struct r5conf *conf)
for (i = 0; i < ppl_conf->count; i++) {
struct ppl_log *log = &ppl_conf->child_logs[i];
- struct md_rdev *rdev = conf->disks[i].rdev;
+ /* Array has not started so rcu dereference is safe */
+ struct md_rdev *rdev =
+ rcu_dereference_protected(conf->disks[i].rdev, 1);
mutex_init(&log->io_mutex);
spin_lock_init(&log->io_list_lock);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 351d341a1ffa..39038fa8b1c8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -79,18 +79,21 @@ static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect)
}
static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
+ __acquires(&conf->device_lock)
{
spin_lock_irq(conf->hash_locks + hash);
spin_lock(&conf->device_lock);
}
static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
+ __releases(&conf->device_lock)
{
spin_unlock(&conf->device_lock);
spin_unlock_irq(conf->hash_locks + hash);
}
static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
+ __acquires(&conf->device_lock)
{
int i;
spin_lock_irq(conf->hash_locks);
@@ -100,6 +103,7 @@ static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
}
static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
+ __releases(&conf->device_lock)
{
int i;
spin_unlock(&conf->device_lock);
@@ -164,6 +168,7 @@ static bool stripe_is_lowprio(struct stripe_head *sh)
}
static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
+ __must_hold(&sh->raid_conf->device_lock)
{
struct r5conf *conf = sh->raid_conf;
struct r5worker_group *group;
@@ -211,6 +216,7 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
struct list_head *temp_inactive_list)
+ __must_hold(&conf->device_lock)
{
int i;
int injournal = 0; /* number of date pages with R5_InJournal */
@@ -296,6 +302,7 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
struct list_head *temp_inactive_list)
+ __must_hold(&conf->device_lock)
{
if (atomic_dec_and_test(&sh->count))
do_release_stripe(conf, sh, temp_inactive_list);
@@ -350,9 +357,9 @@ static void release_inactive_stripe_list(struct r5conf *conf,
}
}
-/* should hold conf->device_lock already */
static int release_stripe_list(struct r5conf *conf,
struct list_head *temp_inactive_list)
+ __must_hold(&conf->device_lock)
{
struct stripe_head *sh, *t;
int count = 0;
@@ -629,6 +636,10 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* This is because some failed devices may only affect one
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
+ *
+ * Most calls to this function hold &conf->device_lock. Calls
+ * in raid5_run() do not require the lock as no other threads
+ * have been started yet.
*/
int raid5_calc_degraded(struct r5conf *conf)
{
@@ -686,17 +697,17 @@ int raid5_calc_degraded(struct r5conf *conf)
return degraded;
}
-static int has_failed(struct r5conf *conf)
+static bool has_failed(struct r5conf *conf)
{
- int degraded;
+ int degraded = conf->mddev->degraded;
- if (conf->mddev->reshape_position == MaxSector)
- return conf->mddev->degraded > conf->max_degraded;
+ if (test_bit(MD_BROKEN, &conf->mddev->flags))
+ return true;
- degraded = raid5_calc_degraded(conf);
- if (degraded > conf->max_degraded)
- return 1;
- return 0;
+ if (conf->mddev->reshape_position != MaxSector)
+ degraded = raid5_calc_degraded(conf);
+
+ return degraded > conf->max_degraded;
}
struct stripe_head *
@@ -2648,6 +2659,28 @@ static void shrink_stripes(struct r5conf *conf)
conf->slab_cache = NULL;
}
+/*
+ * This helper wraps rcu_dereference_protected() and can be used when
+ * it is known that the nr_pending of the rdev is elevated.
+ */
+static struct md_rdev *rdev_pend_deref(struct md_rdev __rcu *rdev)
+{
+ return rcu_dereference_protected(rdev,
+ atomic_read(&rcu_access_pointer(rdev)->nr_pending));
+}
+
+/*
+ * This helper wraps rcu_dereference_protected() and should be used
+ * when it is known that the mddev_lock() is held. This is safe
+ * seeing raid5_remove_disk() has the same lock held.
+ */
+static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev,
+ struct md_rdev __rcu *rdev)
+{
+ return rcu_dereference_protected(rdev,
+ lockdep_is_held(&mddev->reconfig_mutex));
+}
+
static void raid5_end_read_request(struct bio * bi)
{
struct stripe_head *sh = bi->bi_private;
@@ -2674,9 +2707,9 @@ static void raid5_end_read_request(struct bio * bi)
* In that case it moved down to 'rdev'.
* rdev is not removed until all requests are finished.
*/
- rdev = conf->disks[i].replacement;
+ rdev = rdev_pend_deref(conf->disks[i].replacement);
if (!rdev)
- rdev = conf->disks[i].rdev;
+ rdev = rdev_pend_deref(conf->disks[i].rdev);
if (use_new_offset(conf, sh))
s = sh->sector + rdev->new_data_offset;
@@ -2790,11 +2823,11 @@ static void raid5_end_write_request(struct bio *bi)
for (i = 0 ; i < disks; i++) {
if (bi == &sh->dev[i].req) {
- rdev = conf->disks[i].rdev;
+ rdev = rdev_pend_deref(conf->disks[i].rdev);
break;
}
if (bi == &sh->dev[i].rreq) {
- rdev = conf->disks[i].replacement;
+ rdev = rdev_pend_deref(conf->disks[i].replacement);
if (rdev)
replacement = 1;
else
@@ -2802,7 +2835,7 @@ static void raid5_end_write_request(struct bio *bi)
* replaced it. rdev is not removed
* until all requests are finished.
*/
- rdev = conf->disks[i].rdev;
+ rdev = rdev_pend_deref(conf->disks[i].rdev);
break;
}
}
@@ -2863,34 +2896,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
unsigned long flags;
pr_debug("raid456: error called\n");
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
+
spin_lock_irqsave(&conf->device_lock, flags);
+ set_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ mddev->degraded = raid5_calc_degraded(conf);
- if (test_bit(In_sync, &rdev->flags) &&
- mddev->degraded == conf->max_degraded) {
- /*
- * Don't allow to achieve failed state
- * Don't try to recover this device
- */
+ if (has_failed(conf)) {
+ set_bit(MD_BROKEN, &conf->mddev->flags);
conf->recovery_disabled = mddev->recovery_disabled;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- return;
+
+ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
+ mdname(mddev), mddev->degraded, conf->raid_disks);
+ } else {
+ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
- set_bit(Faulty, &rdev->flags);
- clear_bit(In_sync, &rdev->flags);
- mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
r5c_update_on_rdev_error(mddev, rdev);
}
@@ -5213,23 +5243,23 @@ finish:
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
/* We own a safe reference to the rdev */
- rdev = conf->disks[i].rdev;
+ rdev = rdev_pend_deref(conf->disks[i].rdev);
if (!rdev_set_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0))
md_error(conf->mddev, rdev);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
- rdev = conf->disks[i].rdev;
+ rdev = rdev_pend_deref(conf->disks[i].rdev);
rdev_clear_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
- rdev = conf->disks[i].replacement;
+ rdev = rdev_pend_deref(conf->disks[i].replacement);
if (!rdev)
/* rdev have been moved down */
- rdev = conf->disks[i].rdev;
+ rdev = rdev_pend_deref(conf->disks[i].rdev);
rdev_clear_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0);
rdev_dec_pending(rdev, conf->mddev);
@@ -5256,6 +5286,7 @@ finish:
}
static void raid5_activate_delayed(struct r5conf *conf)
+ __must_hold(&conf->device_lock)
{
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
while (!list_empty(&conf->delayed_list)) {
@@ -5273,9 +5304,9 @@ static void raid5_activate_delayed(struct r5conf *conf)
}
static void activate_bit_delay(struct r5conf *conf,
- struct list_head *temp_inactive_list)
+ struct list_head *temp_inactive_list)
+ __must_hold(&conf->device_lock)
{
- /* device_lock is held */
struct list_head head;
list_add(&head, &conf->bitmap_list);
list_del_init(&conf->bitmap_list);
@@ -5500,6 +5531,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
* handle_list.
*/
static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
+ __must_hold(&conf->device_lock)
{
struct stripe_head *sh, *tmp;
struct list_head *handle_list = NULL;
@@ -6288,7 +6320,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
@@ -6371,8 +6403,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
static int handle_active_stripes(struct r5conf *conf, int group,
struct r5worker *worker,
struct list_head *temp_inactive_list)
- __releases(&conf->device_lock)
- __acquires(&conf->device_lock)
+ __must_hold(&conf->device_lock)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0, hash;
@@ -7166,7 +7197,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int i;
int group_cnt;
struct r5worker_group *new_group;
- int ret;
+ int ret = -ENOMEM;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -7225,6 +7256,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->device_lock);
seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
mutex_init(&conf->cache_size_mutex);
+
init_waitqueue_head(&conf->wait_for_quiescent);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
@@ -7242,7 +7274,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
if (test_bit(Journal, &rdev->flags))
continue;
- if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+ if (bdev_nonrot(rdev->bdev)) {
conf->batch_bio_dispatch = false;
break;
}
@@ -7302,11 +7334,13 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->level = mddev->new_level;
conf->chunk_sectors = mddev->new_chunk_sectors;
- if (raid5_alloc_percpu(conf) != 0)
+ ret = raid5_alloc_percpu(conf);
+ if (ret)
goto abort;
pr_debug("raid456: run(%s) called.\n", mdname(mddev));
+ ret = -EIO;
rdev_for_each(rdev, mddev) {
raid_disk = rdev->raid_disk;
if (raid_disk >= max_disks
@@ -7317,11 +7351,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (test_bit(Replacement, &rdev->flags)) {
if (disk->replacement)
goto abort;
- disk->replacement = rdev;
+ RCU_INIT_POINTER(disk->replacement, rdev);
} else {
if (disk->rdev)
goto abort;
- disk->rdev = rdev;
+ RCU_INIT_POINTER(disk->rdev, rdev);
}
if (test_bit(In_sync, &rdev->flags)) {
@@ -7370,6 +7404,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (grow_stripes(conf, conf->min_nr_stripes)) {
pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
+ ret = -ENOMEM;
goto abort;
} else
pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
@@ -7383,7 +7418,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- if (register_shrinker(&conf->shrinker)) {
+ ret = register_shrinker(&conf->shrinker);
+ if (ret) {
pr_warn("md/raid:%s: couldn't register shrinker.\n",
mdname(mddev));
goto abort;
@@ -7394,17 +7430,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (!conf->thread) {
pr_warn("md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
+ ret = -ENOMEM;
goto abort;
}
return conf;
abort:
- if (conf) {
+ if (conf)
free_conf(conf);
- return ERR_PTR(-EIO);
- } else
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
@@ -7621,17 +7656,18 @@ static int raid5_run(struct mddev *mddev)
for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
i++) {
- rdev = conf->disks[i].rdev;
+ rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev);
if (!rdev && conf->disks[i].replacement) {
/* The replacement is all we have yet */
- rdev = conf->disks[i].replacement;
+ rdev = rdev_mdlock_deref(mddev,
+ conf->disks[i].replacement);
conf->disks[i].replacement = NULL;
clear_bit(Replacement, &rdev->flags);
- conf->disks[i].rdev = rdev;
+ rcu_assign_pointer(conf->disks[i].rdev, rdev);
}
if (!rdev)
continue;
- if (conf->disks[i].replacement &&
+ if (rcu_access_pointer(conf->disks[i].replacement) &&
conf->reshape_progress != MaxSector) {
/* replacements and reshape simply do not mix. */
pr_warn("md: cannot handle concurrent replacement and reshape.\n");
@@ -7749,7 +7785,6 @@ static int raid5_run(struct mddev *mddev)
*/
stripe = stripe * PAGE_SIZE;
stripe = roundup_pow_of_two(stripe);
- mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
@@ -7776,14 +7811,10 @@ static int raid5_run(struct mddev *mddev)
* A better idea might be to turn DISCARD into WRITE_ZEROES
* requests, as that is required to be safe.
*/
- if (devices_handle_discard_safely &&
- mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
- mddev->queue->limits.discard_granularity >= stripe)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
- mddev->queue);
+ if (!devices_handle_discard_safely ||
+ mddev->queue->limits.max_discard_sectors < (stripe >> 9) ||
+ mddev->queue->limits.discard_granularity < stripe)
+ blk_queue_max_discard_sectors(mddev->queue, 0);
blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
}
@@ -7832,8 +7863,8 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
static void print_raid5_conf (struct r5conf *conf)
{
+ struct md_rdev *rdev;
int i;
- struct disk_info *tmp;
pr_debug("RAID conf printout:\n");
if (!conf) {
@@ -7844,50 +7875,54 @@ static void print_raid5_conf (struct r5conf *conf)
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
+ rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
- tmp = conf->disks + i;
- if (tmp->rdev)
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev)
pr_debug(" disk %d, o:%d, dev:%s\n",
- i, !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev, b));
+ i, !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev, b));
}
+ rcu_read_unlock();
}
static int raid5_spare_active(struct mddev *mddev)
{
int i;
struct r5conf *conf = mddev->private;
- struct disk_info *tmp;
+ struct md_rdev *rdev, *replacement;
int count = 0;
unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
- tmp = conf->disks + i;
- if (tmp->replacement
- && tmp->replacement->recovery_offset == MaxSector
- && !test_bit(Faulty, &tmp->replacement->flags)
- && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev);
+ replacement = rdev_mdlock_deref(mddev,
+ conf->disks[i].replacement);
+ if (replacement
+ && replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &replacement->flags)
+ && !test_and_set_bit(In_sync, &replacement->flags)) {
/* Replacement has just become active. */
- if (!tmp->rdev
- || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ if (!rdev
+ || !test_and_clear_bit(In_sync, &rdev->flags))
count++;
- if (tmp->rdev) {
+ if (rdev) {
/* Replaced device not technically faulty,
* but we need to be sure it gets removed
* and never re-added.
*/
- set_bit(Faulty, &tmp->rdev->flags);
+ set_bit(Faulty, &rdev->flags);
sysfs_notify_dirent_safe(
- tmp->rdev->sysfs_state);
+ rdev->sysfs_state);
}
- sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
- } else if (tmp->rdev
- && tmp->rdev->recovery_offset == MaxSector
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+ sysfs_notify_dirent_safe(replacement->sysfs_state);
+ } else if (rdev
+ && rdev->recovery_offset == MaxSector
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_and_set_bit(In_sync, &rdev->flags)) {
count++;
- sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
spin_lock_irqsave(&conf->device_lock, flags);
@@ -7902,8 +7937,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct r5conf *conf = mddev->private;
int err = 0;
int number = rdev->raid_disk;
- struct md_rdev **rdevp;
+ struct md_rdev __rcu **rdevp;
struct disk_info *p = conf->disks + number;
+ struct md_rdev *tmp;
print_raid5_conf(conf);
if (test_bit(Journal, &rdev->flags) && conf->log) {
@@ -7921,9 +7957,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
log_exit(conf);
return 0;
}
- if (rdev == p->rdev)
+ if (rdev == rcu_access_pointer(p->rdev))
rdevp = &p->rdev;
- else if (rdev == p->replacement)
+ else if (rdev == rcu_access_pointer(p->replacement))
rdevp = &p->replacement;
else
return 0;
@@ -7943,18 +7979,20 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (!test_bit(Faulty, &rdev->flags) &&
mddev->recovery_disabled != conf->recovery_disabled &&
!has_failed(conf) &&
- (!p->replacement || p->replacement == rdev) &&
+ (!rcu_access_pointer(p->replacement) ||
+ rcu_access_pointer(p->replacement) == rdev) &&
number < conf->raid_disks) {
err = -EBUSY;
goto abort;
}
*rdevp = NULL;
if (!test_bit(RemoveSynchronized, &rdev->flags)) {
+ lockdep_assert_held(&mddev->reconfig_mutex);
synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
- *rdevp = rdev;
+ rcu_assign_pointer(*rdevp, rdev);
}
}
if (!err) {
@@ -7962,17 +8000,19 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (err)
goto abort;
}
- if (p->replacement) {
+
+ tmp = rcu_access_pointer(p->replacement);
+ if (tmp) {
/* We must have just cleared 'rdev' */
- p->rdev = p->replacement;
- clear_bit(Replacement, &p->replacement->flags);
+ rcu_assign_pointer(p->rdev, tmp);
+ clear_bit(Replacement, &tmp->flags);
smp_mb(); /* Make sure other CPUs may see both as identical
* but will never see neither - if they are careful
*/
- p->replacement = NULL;
+ rcu_assign_pointer(p->replacement, NULL);
if (!err)
- err = log_modify(conf, p->rdev, true);
+ err = log_modify(conf, tmp, true);
}
clear_bit(WantReplacement, &rdev->flags);
@@ -7988,6 +8028,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int ret, err = -EEXIST;
int disk;
struct disk_info *p;
+ struct md_rdev *tmp;
int first = 0;
int last = conf->raid_disks - 1;
@@ -8045,7 +8086,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
}
for (disk = first; disk <= last; disk++) {
p = conf->disks + disk;
- if (test_bit(WantReplacement, &p->rdev->flags) &&
+ tmp = rdev_mdlock_deref(mddev, p->rdev);
+ if (test_bit(WantReplacement, &tmp->flags) &&
p->replacement == NULL) {
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
@@ -8336,6 +8378,7 @@ static void end_reshape(struct r5conf *conf)
static void raid5_finish_reshape(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
+ struct md_rdev *rdev;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8347,10 +8390,12 @@ static void raid5_finish_reshape(struct mddev *mddev)
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
d++) {
- struct md_rdev *rdev = conf->disks[d].rdev;
+ rdev = rdev_mdlock_deref(mddev,
+ conf->disks[d].rdev);
if (rdev)
clear_bit(In_sync, &rdev->flags);
- rdev = conf->disks[d].replacement;
+ rdev = rdev_mdlock_deref(mddev,
+ conf->disks[d].replacement);
if (rdev)
clear_bit(In_sync, &rdev->flags);
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 9e8486a9e445..638d29863503 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -473,7 +473,8 @@ enum {
*/
struct disk_info {
- struct md_rdev *rdev, *replacement;
+ struct md_rdev __rcu *rdev;
+ struct md_rdev __rcu *replacement;
struct page *extra_page; /* extra page to use in prexor */
};
@@ -560,6 +561,16 @@ struct r5pending_data {
struct bio_list bios;
};
+struct raid5_percpu {
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
+ * conversions
+ */
+ int scribble_obj_size;
+ local_lock_t lock;
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
@@ -635,15 +646,7 @@ struct r5conf {
*/
int recovery_disabled;
/* per cpu variables */
- struct raid5_percpu {
- struct page *spare_page; /* Used when checking P/Q in raid6 */
- void *scribble; /* space for constructing buffer
- * lists and performing address
- * conversions
- */
- int scribble_obj_size;
- local_lock_t lock;
- } __percpu *percpu;
+ struct raid5_percpu __percpu *percpu;
int scribble_disks;
int scribble_sectors;
struct hlist_node node;
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
index 28f2bafc14d2..5afa373e534f 100644
--- a/drivers/media/platform/nxp/Kconfig
+++ b/drivers/media/platform/nxp/Kconfig
@@ -7,6 +7,7 @@ comment "NXP media platform drivers"
config VIDEO_IMX_MIPI_CSIS
tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
depends on ARCH_MXC || COMPILE_TEST
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 4de5e8d2b261..3d3d1062e212 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
- if (rga->dst_mmu_pages) {
+ if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 47029746b89e..0de587b412d4 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -77,16 +77,16 @@ err_mutex_unlock:
}
static const struct si2157_tuner_info si2157_tuners[] = {
- { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
- { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
- { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
- { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
- { SI2148, true, 0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2148, true, 0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
- { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
+ { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
+ { SI2148, 0x32, true, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2148, 0x33, true, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
};
static int si2157_load_firmware(struct dvb_frontend *fe,
@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
}
}
- if (!fw_name && !fw_alt_name) {
+ if (required && !fw_name && !fw_alt_name) {
dev_err(&client->dev,
"unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index c267283b01fd..e749dcb3ddea 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
- if (IS_ERR(ebi->smc.regmap))
- return PTR_ERR(ebi->smc.regmap);
+ if (IS_ERR(ebi->smc.regmap)) {
+ ret = PTR_ERR(ebi->smc.regmap);
+ goto put_node;
+ }
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
- if (IS_ERR(ebi->smc.layout))
- return PTR_ERR(ebi->smc.layout);
+ if (IS_ERR(ebi->smc.layout)) {
+ ret = PTR_ERR(ebi->smc.layout);
+ goto put_node;
+ }
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
- if (PTR_ERR(ebi->smc.clk) != -ENOENT)
- return PTR_ERR(ebi->smc.clk);
+ if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+ ret = PTR_ERR(ebi->smc.clk);
+ goto put_node;
+ }
ebi->smc.clk = NULL;
}
+ of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
}
return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+ of_node_put(smc_np);
+ return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2f6939da21cd..e83b61c925a4 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
}
/* legacy dts may still use "simple-bus" compatible */
- ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
- &dev->dev);
+ ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
if (ret)
goto err_free_nandirq;
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index e4cc64f56019..019a0822bde0 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -164,25 +164,39 @@ static const struct regmap_access_table rpcif_volatile_table = {
/*
- * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
- * with proper width. Requires SMENR_SPIDE to be correctly set before!
+ * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
+ * proper width. Requires rpcif.xfer_size to be correctly set before!
*/
static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct rpcif *rpc = context;
- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
-
- if (spide == 0x8) {
+ switch (reg) {
+ case RPCIF_SMRDR0:
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
*val = readb(rpc->base + reg);
return 0;
- } else if (spide == 0xC) {
+
+ case 2:
*val = readw(rpc->base + reg);
return 0;
- } else if (spide != 0xF) {
+
+ case 4:
+ case 8:
+ *val = readl(rpc->base + reg);
+ return 0;
+
+ default:
return -EILSEQ;
}
+
+ case RPCIF_SMRDR1:
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
}
*val = readl(rpc->base + reg);
@@ -193,18 +207,34 @@ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct rpcif *rpc = context;
- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
-
- if (spide == 0x8) {
+ switch (reg) {
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
writeb(val, rpc->base + reg);
return 0;
- } else if (spide == 0xC) {
+
+ case 2:
writew(val, rpc->base + reg);
return 0;
- } else if (spide != 0xF) {
+
+ case 4:
+ case 8:
+ writel(val, rpc->base + reg);
+ return 0;
+
+ default:
return -EILSEQ;
}
+
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
+
+ case RPCIF_SMRDR0:
+ case RPCIF_SMRDR1:
+ return -EPERM;
}
writel(val, rpc->base + reg);
@@ -469,6 +499,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ rpc->xfer_size = nbytes;
memcpy(data, rpc->buffer + pos, nbytes);
if (nbytes == 8) {
@@ -533,6 +564,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR,
rpc->smcr | RPCIF_SMCR_SPIE);
+ rpc->xfer_size = nbytes;
ret = wait_msg_xfer_end(rpc);
if (ret)
goto err_out;
@@ -651,6 +683,7 @@ static int rpcif_probe(struct platform_device *pdev)
struct platform_device *vdev;
struct device_node *flash;
const char *name;
+ int ret;
flash = of_get_next_child(pdev->dev.of_node, NULL);
if (!flash) {
@@ -674,7 +707,14 @@ static int rpcif_probe(struct platform_device *pdev)
return -ENOMEM;
vdev->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, vdev);
- return platform_device_add(vdev);
+
+ ret = platform_device_add(vdev);
+ if (ret) {
+ platform_device_put(vdev);
+ return ret;
+ }
+
+ return 0;
}
static int rpcif_remove(struct platform_device *pdev)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e90adfa57950..9b3ba2df71c7 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6658,13 +6658,13 @@ static int mpt_summary_proc_show(struct seq_file *m, void *v)
static int mpt_version_proc_show(struct seq_file *m, void *v)
{
u8 cb_idx;
- int scsi, fc, sas, lan, ctl, targ, dmp;
+ int scsi, fc, sas, lan, ctl, targ;
char *drvname;
seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
seq_printf(m, " Fusion MPT base driver\n");
- scsi = fc = sas = lan = ctl = targ = dmp = 0;
+ scsi = fc = sas = lan = ctl = targ = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
drvname = NULL;
if (MptCallbacks[cb_idx]) {
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 91f96abbb3f9..8d169a35cf13 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -31,6 +31,8 @@
*/
#define FM25_SN_LEN 8 /* serial number length */
+#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
+
struct at25_data {
struct spi_eeprom chip;
struct spi_device *spi;
@@ -39,6 +41,7 @@ struct at25_data {
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
u8 sernum[FM25_SN_LEN];
+ u8 command[EE_MAXADDRLEN + 1];
};
#define AT25_WREN 0x06 /* latch the write enable */
@@ -61,8 +64,6 @@ struct at25_data {
#define FM25_ID_LEN 9 /* ID length */
-#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
-
/*
* Specs often allow 5ms for a page write, sometimes 20ms;
* it's important to recover from write timeouts.
@@ -78,7 +79,6 @@ static int at25_ee_read(void *priv, unsigned int offset,
{
struct at25_data *at25 = priv;
char *buf = val;
- u8 command[EE_MAXADDRLEN + 1];
u8 *cp;
ssize_t status;
struct spi_transfer t[2];
@@ -92,12 +92,15 @@ static int at25_ee_read(void *priv, unsigned int offset,
if (unlikely(!count))
return -EINVAL;
- cp = command;
+ cp = at25->command;
instr = AT25_READ;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
+
+ mutex_lock(&at25->lock);
+
*cp++ = instr;
/* 8/16/24-bit address is written MSB first */
@@ -116,7 +119,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
spi_message_init(&m);
memset(t, 0, sizeof(t));
- t[0].tx_buf = command;
+ t[0].tx_buf = at25->command;
t[0].len = at25->addrlen + 1;
spi_message_add_tail(&t[0], &m);
@@ -124,8 +127,6 @@ static int at25_ee_read(void *priv, unsigned int offset,
t[1].len = count;
spi_message_add_tail(&t[1], &m);
- mutex_lock(&at25->lock);
-
/*
* Read it all at once.
*
@@ -152,7 +153,7 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
spi_message_init(&m);
memset(t, 0, sizeof(t));
- t[0].tx_buf = &command;
+ t[0].tx_buf = at25->command;
t[0].len = 1;
spi_message_add_tail(&t[0], &m);
@@ -162,6 +163,8 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
mutex_lock(&at25->lock);
+ at25->command[0] = command;
+
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status);
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index e008d82e4ba3..a13506dd8119 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -111,10 +111,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
if (contiguous) {
if (is_power_of_2(page_size))
- paddr = (u64) (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
- total_size, NULL, page_size);
+ paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
+ total_size, NULL, page_size);
else
- paddr = (u64) (uintptr_t) gen_pool_alloc(vm->dram_pg_pool, total_size);
+ paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
"failed to allocate %llu contiguous pages with total size of %llu\n",
@@ -150,12 +150,12 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
for (i = 0 ; i < num_pgs ; i++) {
if (is_power_of_2(page_size))
phys_pg_pack->pages[i] =
- (u64) gen_pool_dma_alloc_align(vm->dram_pg_pool,
- page_size, NULL,
- page_size);
+ (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
+ page_size, NULL,
+ page_size);
else
- phys_pg_pack->pages[i] = (u64) gen_pool_alloc(vm->dram_pg_pool,
- page_size);
+ phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
+ page_size);
if (!phys_pg_pack->pages[i]) {
dev_err(hdev->dev,
"Failed to allocate device memory (out of memory)\n");
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4e67c1403cc9..506dc900f5c7 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -993,7 +993,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
return -EEXIST;
md->reset_done |= type;
- err = mmc_hw_reset(host);
+ err = mmc_hw_reset(host->card);
/* Ensure we switch back to the correct partition */
if (err) {
struct mmc_blk_data *main_md =
@@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
+static int mmc_spi_err_check(struct mmc_card *card)
+{
+ u32 status = 0;
+ int err;
+
+ /*
+ * SPI does not have a TRAN state we have to wait on, instead the
+ * card is ready again when it no longer holds the line LOW.
+ * We still have to ensure two things here before we know the write
+ * was successful:
+ * 1. The card has not disconnected during busy and we actually read our
+ * own pull-up, thinking it was still connected, so ensure it
+ * still responds.
+ * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
+ * just reconnected card after being disconnected during busy.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err)
+ return err;
+ /* All R1 and R2 bits of SPI are errors in our case */
+ if (status)
+ return -EIO;
+ return 0;
+}
+
static int mmc_blk_busy_cb(void *cb_data, bool *busy)
{
struct mmc_blk_busy_data *data = cb_data;
@@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
struct mmc_blk_busy_data cb_data;
int err;
- if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ if (rq_data_dir(req) == READ)
return 0;
+ if (mmc_host_is_spi(card->host)) {
+ err = mmc_spi_err_check(card);
+ if (err)
+ mqrq->brq.data.bytes_xfered = 0;
+ return err;
+ }
+
cb_data.card = card;
cb_data.status = 0;
err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
@@ -2350,6 +2382,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
+ bool cache_enabled = false;
+ bool fua_enabled = false;
devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
if (devidx < 0) {
@@ -2429,13 +2463,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->flags |= MMC_BLK_CMD23;
}
- if (mmc_card_mmc(card) &&
- md->flags & MMC_BLK_CMD23 &&
+ if (md->flags & MMC_BLK_CMD23 &&
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
- blk_queue_write_cache(md->queue.queue, true, true);
+ fua_enabled = true;
+ cache_enabled = true;
}
+ if (mmc_cache_enabled(card->host))
+ cache_enabled = true;
+
+ blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 368f10405e13..c6ae16d40766 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1995,7 +1995,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
/**
* mmc_hw_reset - reset the card in hardware
- * @host: MMC host to which the card is attached
+ * @card: card to be reset
*
* Hard reset the card. This function is only for upper layers, like the
* block layer or card drivers. You cannot use it in host drivers (struct
@@ -2003,8 +2003,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
*
* Return: 0 on success, -errno on failure
*/
-int mmc_hw_reset(struct mmc_host *host)
+int mmc_hw_reset(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
int ret;
ret = host->bus_ops->hw_reset(host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e7ea45386c22..efa95dc4fc4e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1384,13 +1384,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ */
mmc_set_timing(host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
err = mmc_switch_status(card, true);
if (err)
goto out_err;
- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1448,7 +1452,7 @@ out_err:
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned int old_timing, old_signal_voltage;
+ unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
@@ -1479,8 +1483,17 @@ static int mmc_select_hs200(struct mmc_card *card)
false, true, MMC_CMD_RETRIES);
if (err)
goto err;
+
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ * NB: We can't move to full (HS200) speeds until after we've
+ * successfully switched over.
+ */
old_timing = host->ios.timing;
+ old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
@@ -1493,8 +1506,10 @@ static int mmc_select_hs200(struct mmc_card *card)
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
- if (err == -EBADMSG)
+ if (err == -EBADMSG) {
+ mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
+ }
}
err:
if (err) {
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 180d7e9d3400..81c55bfd6e0c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -21,7 +21,7 @@
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
-#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
+#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
static const u8 tuning_blk_pattern_4bit[] = {
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index e6a2fd2c6d5c..8d9bceeff986 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -2325,10 +2325,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
static int mmc_test_reset(struct mmc_test_card *test)
{
struct mmc_card *card = test->card;
- struct mmc_host *host = card->host;
int err;
- err = mmc_hw_reset(host);
+ err = mmc_hw_reset(card);
if (!err) {
/*
* Reset will re-enable the card's command queue, but tests
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index c69b2d9df6f1..a3d446005571 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -183,14 +183,13 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard)
return;
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
blk_queue_max_discard_sectors(q, max_discard);
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = SECTOR_SIZE;
if (mmc_can_secure_erase_trim(card))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+ blk_queue_max_secure_erase_sectors(q, max_discard);
}
static unsigned short mmc_get_max_segments(struct mmc_host *host)
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 9c13f2c31365..4566d7fc9055 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
* excepted the last element which has no constraint on idmasize
*/
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
- !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
dev_err(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
@@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
}
}
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
dev_err(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 2797a9c0f17d..ddb5ca2f559e 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -144,9 +144,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
return clk_get_rate(priv->clk);
if (priv->clkh) {
+ /* HS400 with 4TAP needs different clock settings */
bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
- bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
- (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
+ bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
ref_clk = priv->clkh;
}
@@ -396,10 +396,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
- /* Set the sampling clock selection range of HS400 mode */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
- 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+ sd_scc_read32(host, priv,
+ SH_MOBILE_SDHI_SCC_DTCNTL));
/* Avoid bad TAP */
if (bad_taps & BIT(priv->tap_set)) {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 50c71e0ba5e4..ff9f5b63c337 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -2482,6 +2483,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
}
+static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
+{
+ struct reset_control *reset;
+ int ret = 0;
+
+ reset = reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "unable to acquire core_reset\n");
+
+ if (!reset)
+ return ret;
+
+ ret = reset_control_assert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset assert failed\n");
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset deassert failed\n");
+ }
+
+ usleep_range(200, 210);
+ reset_control_put(reset);
+
+ return ret;
+}
static int sdhci_msm_probe(struct platform_device *pdev)
{
@@ -2529,6 +2567,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+ ret = sdhci_msm_gcc_reset(&pdev->dev, host);
+ if (ret)
+ goto pltfm_free;
+
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 666cee4c7f7c..08e838400b52 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
-
- /*
- * For some reason the controller's Host Control2 register reports
- * the bit representing 1.8V signaling as 0 when read after it was
- * written as 1. Subsequent read reports 1.
- *
- * Since this may cause some issues, do an empty read of the Host
- * Control2 register here to circumvent this.
- */
- sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static unsigned int xenon_get_max_clock(struct sdhci_host *host)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index c62afd212692..46f9e2923d86 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
pdes[i].buf_addr_ptr1 =
cpu_to_le32(sg_dma_address(&data->sg[i]) >>
host->cfg->idma_des_shift);
- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
- host->cfg->idma_des_shift);
+ pdes[i].buf_addr_ptr2 =
+ cpu_to_le32(next_desc >>
+ host->cfg->idma_des_shift);
}
pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 64d2b093f114..f73172111465 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -377,7 +377,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
if (tr->discard) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
new->rq->limits.discard_granularity = tr->blksize;
}
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index e7df3dac705e..49ab3448b9b1 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -43,6 +43,7 @@
struct mtk_ecc_caps {
u32 err_mask;
+ u32 err_shift;
const u8 *ecc_strength;
const u32 *ecc_regs;
u8 num_ecc_strength;
@@ -76,7 +77,7 @@ static const u8 ecc_strength_mt2712[] = {
};
static const u8 ecc_strength_mt7622[] = {
- 4, 6, 8, 10, 12, 14, 16
+ 4, 6, 8, 10, 12
};
enum mtk_ecc_regs {
@@ -221,7 +222,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
for (i = 0; i < sectors; i++) {
offset = (i >> 2) << 2;
err = readl(ecc->regs + ECC_DECENUM0 + offset);
- err = err >> ((i % 4) * 8);
+ err = err >> ((i % 4) * ecc->caps->err_shift);
err &= ecc->caps->err_mask;
if (err == ecc->caps->err_mask) {
/* uncorrectable errors */
@@ -449,6 +450,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
+ .err_shift = 8,
.ecc_strength = ecc_strength_mt2701,
.ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
@@ -459,6 +461,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
+ .err_shift = 8,
.ecc_strength = ecc_strength_mt2712,
.ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
@@ -468,10 +471,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
- .err_mask = 0x3f,
+ .err_mask = 0x1f,
+ .err_shift = 5,
.ecc_strength = ecc_strength_mt7622,
.ecc_regs = mt7622_ecc_regs,
- .num_ecc_strength = 7,
+ .num_ecc_strength = 5,
.ecc_mode_shift = 4,
.parity_bits = 13,
.pg_irq_sel = 0,
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 1a77542c6d67..048b255faa76 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2651,10 +2651,23 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ /* Free the initially allocated BAM transaction for reading the ONFI params */
+ if (nandc->props->is_bam)
+ free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
+ /* Now allocate the BAM transaction based on updated max_cwperpage */
+ if (nandc->props->is_bam) {
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ return -ENOMEM;
+ }
+ }
+
/*
* DATA_UD_BYTES varies based on whether the read/write command protects
* spare data with ECC too. We protect spare data by default, so we set
@@ -2955,17 +2968,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
return ret;
- if (nandc->props->is_bam) {
- free_bam_transaction(nandc);
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- nand_cleanup(chip);
- return -ENOMEM;
- }
- }
-
ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (ret)
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index b85b9c6fcc42..a278829469d6 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -384,7 +384,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
dma_addr_t dma_addr;
dma_cookie_t cookie;
uint32_t reg;
- int ret;
+ int ret = 0;
+ unsigned long time_left;
if (dir == DMA_FROM_DEVICE) {
chan = flctl->chan_fifo0_rx;
@@ -425,13 +426,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
goto out;
}
- ret =
+ time_left =
wait_for_completion_timeout(&flctl->dma_complete,
msecs_to_jiffies(3000));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
}
out:
@@ -441,7 +443,7 @@ out:
dma_unmap_single(chan->device->dev, dma_addr, len, dir);
- /* ret > 0 is success */
+ /* ret == 0 is success */
return ret;
}
@@ -465,7 +467,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
goto convert; /* DMA success */
/* do polling transfer */
@@ -524,7 +526,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
return; /* DMA success */
/* do polling transfer */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 15eddca7b4b6..38e152548126 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4027,14 +4027,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
return true;
}
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
+
/* discard lowest hash bit to deal with the common even ports pattern */
- return hash >> 1;
+ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+ return hash >> 1;
+
+ return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -4064,7 +4069,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
@@ -5259,7 +5264,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index d0c5a7a60daf..5215bd9b2c80 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -241,13 +241,14 @@ struct grcan_device_config {
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
struct grcan_priv {
struct can_priv can; /* must be the first member */
struct net_device *dev;
+ struct device *ofdev_dev;
struct napi_struct napi;
struct grcan_registers __iomem *regs; /* ioremap'ed registers */
@@ -921,7 +922,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
struct grcan_priv *priv = netdev_priv(dev);
struct grcan_dma *dma = &priv->dma;
- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
dma->base_handle);
memset(dma, 0, sizeof(*dma));
}
@@ -946,7 +947,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
- dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
dma->base_size,
&dma->base_handle,
GFP_KERNEL);
@@ -1102,8 +1103,10 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
+ spin_unlock_irqrestore(&priv->lock, flags);
del_timer_sync(&priv->hang_timer);
del_timer_sync(&priv->rr_timer);
+ spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
grcan_stop_hardware(dev);
@@ -1122,7 +1125,7 @@ static int grcan_close(struct net_device *dev)
return 0;
}
-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -1130,7 +1133,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
spin_lock_irqsave(&priv->lock, flags);
- work_done = catch_up_echo_skb(dev, budget, true);
+ work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@@ -1144,8 +1147,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@@ -1227,19 +1228,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
- int tx_work_done, rx_work_done;
- int rx_budget = budget / 2;
- int tx_budget = budget - rx_budget;
+ int work_done;
- /* Half of the budget for receiving messages */
- rx_work_done = grcan_receive(dev, rx_budget);
+ work_done = grcan_receive(dev, budget);
- /* Half of the budget for transmitting messages as that can trigger echo
- * frames being received
- */
- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+ grcan_transmit_catch_up(dev);
- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@@ -1256,7 +1251,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&priv->lock, flags);
}
- return rx_work_done + tx_work_done;
+ return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@@ -1587,6 +1582,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
memcpy(&priv->config, &grcan_module_config,
sizeof(struct grcan_device_config));
priv->dev = dev;
+ priv->ofdev_dev = &ofdev->dev;
priv->regs = base;
priv->can.bittiming_const = &grcan_bittiming_const;
priv->can.do_set_bittiming = grcan_set_bittiming;
@@ -1639,6 +1635,7 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device_node *sysid_parent;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1647,10 +1644,15 @@ static int grcan_probe(struct platform_device *ofdev)
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
- err = of_property_read_u32(np, "systemid", &sysid);
- if (!err && ((sysid & GRLIB_VERSION_MASK)
- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
- txbug = false;
+ sysid_parent = of_find_node_by_path("/ambapp0");
+ if (sysid_parent) {
+ of_node_get(sysid_parent);
+ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
+ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+ of_node_put(sysid_parent);
+ }
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b3b5bc1c803b..088bb1bcf1ef 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1495,34 +1495,22 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_30X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_30X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 2c5d40997168..d18b515e6ccc 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -85,9 +85,6 @@ struct m_can_classdev {
struct sk_buff *tx_skb;
struct phy *transceiver;
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
-
struct m_can_ops *ops;
int version;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index b56a54d6c5a9..8f184a852a0a 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0
+#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
-struct m_can_pci_config {
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
- unsigned int clock_freq;
-};
-
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
-static const struct can_bittiming_const m_can_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 128,
- .sjw_max = 128,
- .brp_min = 1,
- .brp_max = 512,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 16,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static const struct m_can_pci_config m_can_pci_ehl = {
- .bit_timing = &m_can_bittiming_const_ehl,
- .data_timing = &m_can_data_bittiming_const_ehl,
- .clock_freq = 200000000,
-};
-
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
- const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
- cfg = (const struct m_can_pci_config *)id->driver_data;
-
priv = cdev_to_priv(mcan_class);
priv->base = base;
@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->bit_timing = cfg->bit_timing;
- mcan_class->data_timing = cfg->data_timing;
- mcan_class->can.clock.freq = cfg->clock_freq;
+ mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
- { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 77501f9c5915..fbb32aa49b24 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1354,46 +1354,25 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
config->legacy_pre_march2020 = false;
}
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- int ret = -EOPNOTSUPP;
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_link_state)
- ret = dev->ops->serdes_link_state(dev, port, state);
+ if (!dev->ops->phylink_mac_select_pcs)
+ return NULL;
- return ret;
+ return dev->ops->phylink_mac_select_pcs(dev, port, interface);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct b53_device *dev = ds->priv;
-
- if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED)
- return;
-
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_config)
- dev->ops->serdes_config(dev, port, mode, state);
}
EXPORT_SYMBOL(b53_phylink_mac_config);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct b53_device *dev = ds->priv;
-
- if (dev->ops->serdes_an_restart)
- dev->ops->serdes_an_restart(dev, port);
-}
-EXPORT_SYMBOL(b53_phylink_mac_an_restart);
-
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -2269,9 +2248,8 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
.phylink_get_caps = b53_phylink_get_caps,
- .phylink_mac_link_state = b53_phylink_mac_link_state,
+ .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
.phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_an_restart = b53_phylink_mac_an_restart,
.phylink_mac_link_down = b53_phylink_mac_link_down,
.phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 3085b6cc7d40..795cbffd5c2b 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -21,7 +21,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <net/dsa.h>
@@ -29,7 +29,6 @@
struct b53_device;
struct net_device;
-struct phylink_link_state;
struct b53_io_ops {
int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
@@ -48,13 +47,10 @@ struct b53_io_ops {
void (*irq_disable)(struct b53_device *dev, int port);
void (*phylink_get_caps)(struct b53_device *dev, int port,
struct phylink_config *config);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
u8 (*serdes_map_lane)(struct b53_device *dev, int port);
- int (*serdes_link_state)(struct b53_device *dev, int port,
- struct phylink_link_state *state);
- void (*serdes_config)(struct b53_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- void (*serdes_an_restart)(struct b53_device *dev, int port);
void (*serdes_link_set)(struct b53_device *dev, int port,
unsigned int mode, phy_interface_t interface,
bool link_up);
@@ -85,8 +81,15 @@ enum {
BCM7278_DEVICE_ID = 0x7278,
};
+struct b53_pcs {
+ struct phylink_pcs pcs;
+ struct b53_device *dev;
+ u8 lane;
+};
+
#define B53_N_PORTS 9
#define B53_N_PORTS_25 6
+#define B53_N_PCS 2
struct b53_port {
u16 vlan_ctl_mask;
@@ -143,6 +146,8 @@ struct b53_device {
bool vlan_enabled;
unsigned int num_ports;
struct b53_port *ports;
+
+ struct b53_pcs pcs[B53_N_PCS];
};
#define b53_for_each_port(dev, i) \
@@ -336,12 +341,9 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port);
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 555e5b372321..0690210770ff 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -17,6 +17,11 @@
#include "b53_serdes.h"
#include "b53_regs.h"
+static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct b53_pcs, pcs);
+}
+
static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
u16 value)
{
@@ -60,51 +65,47 @@ static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
return b53_serdes_read_blk(dev, offset, block);
}
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state)
+static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
reg |= FIBER_MODE_1000X;
else
reg &= ~FIBER_MODE_1000X;
b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK, reg);
+
+ return 0;
}
-EXPORT_SYMBOL(b53_serdes_config);
-void b53_serdes_an_restart(struct b53_device *dev, int port)
+static void b53_serdes_an_restart(struct phylink_pcs *pcs)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK);
reg |= BMCR_ANRESTART;
b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK, reg);
}
-EXPORT_SYMBOL(b53_serdes_an_restart);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 dig, bmsr;
- if (lane == B53_INVALID_LANE)
- return 1;
-
dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
SERDES_DIGITAL_BLK);
bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
@@ -133,10 +134,7 @@ int b53_serdes_link_state(struct b53_device *dev, int port,
state->pause |= MLO_PAUSE_RX;
if (dig & PAUSE_RESOLUTION_TX_SIDE)
state->pause |= MLO_PAUSE_TX;
-
- return 0;
}
-EXPORT_SYMBOL(b53_serdes_link_state);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up)
@@ -158,6 +156,12 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
}
EXPORT_SYMBOL(b53_serdes_link_set);
+static const struct phylink_pcs_ops b53_pcs_ops = {
+ .pcs_get_state = b53_serdes_get_state,
+ .pcs_config = b53_serdes_config,
+ .pcs_an_restart = b53_serdes_an_restart,
+};
+
void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
struct phylink_config *config)
{
@@ -187,9 +191,28 @@ void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
}
EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE || lane >= B53_N_PCS ||
+ !dev->pcs[lane].dev)
+ return NULL;
+
+ if (!phy_interface_mode_is_8023z(interface) &&
+ interface != PHY_INTERFACE_MODE_SGMII)
+ return NULL;
+
+ return &dev->pcs[lane].pcs;
+}
+EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs);
+
int b53_serdes_init(struct b53_device *dev, int port)
{
u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_pcs *pcs;
u16 id0, msb, lsb;
if (lane == B53_INVALID_LANE)
@@ -212,6 +235,11 @@ int b53_serdes_init(struct b53_device *dev, int port)
(id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
(u32)msb << 16 | lsb);
+ pcs = &dev->pcs[lane];
+ pcs->dev = dev;
+ pcs->lane = lane;
+ pcs->pcs.ops = &b53_pcs_ops;
+
return 0;
}
EXPORT_SYMBOL(b53_serdes_init);
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
index f47d5caa7557..ef81f5da5f81 100644
--- a/drivers/net/dsa/b53/b53_serdes.h
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -107,14 +107,11 @@ static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
return dev->ops->serdes_map_lane(dev, port);
}
-int b53_serdes_get_link(struct b53_device *dev, int port);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state);
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state);
-void b53_serdes_an_restart(struct b53_device *dev, int port);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up);
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
struct phylink_config *config);
#if IS_ENABLED(CONFIG_B53_SERDES)
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index c51b716657db..da0b889880f6 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -491,10 +491,8 @@ static const struct b53_io_ops b53_srab_ops = {
.irq_disable = b53_srab_irq_disable,
.phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
+ .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs,
.serdes_map_lane = b53_srab_serdes_map_lane,
- .serdes_link_state = b53_serdes_link_state,
- .serdes_config = b53_serdes_config,
- .serdes_an_restart = b53_serdes_an_restart,
.serdes_link_set = b53_serdes_link_set,
#endif
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cf82b1fa9725..87e81c636339 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -809,6 +809,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
+ if (priv->wol_ports_mask & BIT(port))
+ return;
+
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index a416240d001b..12c15da55664 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
- /* Configure the RMII clock as output: */
- miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 8222c8a6c5ec..7310d19d1f06 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1021,14 +1021,32 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
bool ingress, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
+ u8 data;
+ int p;
+
+ /* Limit to one sniffer port
+ * Check if any of the port is already set for sniffing
+ * If yes, instruct the user to remove the previous entry & exit
+ */
+ for (p = 0; p < dev->port_cnt; p++) {
+ /* Skip the current sniffing port */
+ if (p == mirror->to_local_port)
+ continue;
+
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if (data & PORT_MIRROR_SNIFFER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+ }
if (ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
/* configure mirror port */
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, true);
@@ -1042,16 +1060,28 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct ksz_device *dev = ds->priv;
+ bool in_use = false;
u8 data;
+ int p;
if (mirror->ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
- if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
+ /* Check if any of the port is still referring to sniffer port */
+ for (p = 0; p < dev->port_cnt; p++) {
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
+ in_use = true;
+ break;
+ }
+ }
+
+ /* delete sniffing if there are no other mirroring rules */
+ if (!in_use)
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, false);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 19f0035d4410..fe3cb26f4287 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2229,6 +2229,7 @@ mt7530_setup(struct dsa_switch *ds)
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
+ of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
index b49d05f0e117..7a9f9ff6dedf 100644
--- a/drivers/net/dsa/mv88e6xxx/port_hidden.c
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
- return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
- MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+ return mv88e6xxx_port_wait_bit(chip,
+ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 413b0006e9a2..faccfb3f0158 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -403,6 +403,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
@@ -422,9 +423,13 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */
- list_for_each_entry(trap, &ocelot->traps, trap_list) {
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
+
/* Figure out the current trapping destination */
if (using_tag_8021q) {
/* Redirect to the tag_8021q CPU port. If timestamps
@@ -670,6 +675,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
enum dsa_tag_protocol old_proto = felix->tag_proto;
+ bool cpu_port_active = false;
+ struct dsa_port *dp;
int err;
if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -677,6 +684,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
proto != DSA_TAG_PROTO_OCELOT_8021Q)
return -EPROTONOSUPPORT;
+ /* We don't support multiple CPU ports, yet the DT blob may have
+ * multiple CPU ports defined. The first CPU port is the active one,
+ * the others are inactive. In this case, DSA will call
+ * ->change_tag_protocol() multiple times, once per CPU port.
+ * Since we implement the tagging protocol change towards "ocelot" or
+ * "seville" as effectively initializing the NPI port, what we are
+ * doing is effectively changing who the NPI port is to the last @cpu
+ * argument passed, which is an unused DSA CPU port and not the one
+ * that should actively pass traffic.
+ * Suppress DSA's calls on CPU ports that are inactive.
+ */
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dp->cpu_dp->index == cpu) {
+ cpu_port_active = true;
+ break;
+ }
+ }
+
+ if (!cpu_port_active)
+ return 0;
+
felix_del_tag_protocol(ds, cpu, old_proto);
err = felix_set_tag_protocol(ds, cpu, proto);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 8d382b27e625..52a8566071ed 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 1aa79735355f..060165a85fb7 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
help
Select to enable support for Realtek Ethernet switch chips.
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO connected switch driver"
- depends on NET_DSA_REALTEK
+ tristate "Realtek MDIO interface driver"
depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI connected switch driver"
- depends on NET_DSA_REALTEK
+ tristate "Realtek SMI interface driver"
depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
tristate "Realtek RTL8365MB switch subdriver"
- depends on NET_DSA_REALTEK
- depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
tristate "Realtek RTL8366RB switch subdriver"
- depends on NET_DSA_REALTEK
- depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
- Select to enable support for Realtek RTL8366RB
+ Select to enable support for Realtek RTL8366RB.
+
+endif
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 31e1f100e48e..c58f49d558d2 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = {
#endif
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{ .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
- { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
#endif
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 2243d3da55b2..45992f79ec8d 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -546,20 +546,11 @@ static const struct of_device_id realtek_smi_of_match[] = {
.data = &rtl8366rb_variant,
},
#endif
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{
.compatible = "realtek,rtl8365mb",
.data = &rtl8365mb_variant,
},
- {
- .compatible = "realtek,rtl8367s",
- .data = &rtl8365mb_variant,
- },
#endif
{ /* sentinel */ },
};
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index bd4cb9d7c35d..827993022386 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -85,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,8 +126,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -141,10 +140,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -164,6 +163,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -171,10 +171,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 33f1a1377588..24d715c28a35 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 797a95142d1f..831833911a52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -444,7 +444,7 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev, true);
}
static int aq_pm_suspend_poweroff(struct device *dev)
@@ -454,7 +454,7 @@ static int aq_pm_suspend_poweroff(struct device *dev)
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev, true);
}
static int aq_pm_resume_restore(struct device *dev)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 77e76c9efd32..8201ce7adb77 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -346,7 +346,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -364,12 +363,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -377,18 +381,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@ -446,7 +449,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
AQ_CFG_RX_FRAME_MAX);
@@ -455,7 +458,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f4774cf051c9..6ab1f3212d24 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
if (i < self->rx_rings)
aq_ring_free(&ring[AQ_VEC_RX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..15ede7285fb5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 60dde29974bf..df51be3cbe06 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c19b072f3a23..962253db25b8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
@@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1c28495875cf..1d69fe0737a1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2707,6 +2707,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
struct bnxt_cp_ring_info *cpr2;
+ /* No more budget for RX work */
+ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ break;
+
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
@@ -3253,6 +3257,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
+ spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -10338,6 +10343,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (irq_re_init)
udp_tunnel_nic_reset_ntf(bp->dev);
+ if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+ if (!static_key_enabled(&bnxt_xdp_locking_key))
+ static_branch_enable(&bnxt_xdp_locking_key);
+ } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+ static_branch_disable(&bnxt_xdp_locking_key);
+ }
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
@@ -10976,7 +10987,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_CHIP_P5)
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -13227,10 +13238,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- bp->dev->features |= NETIF_F_NTUPLE;
- }
+
+ bnxt_set_dflt_rfs(bp);
+
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 61aa3e8c5952..98453a78cbd0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -593,7 +593,8 @@ struct nqe_cn {
#define BNXT_MAX_MTU 9500
#define BNXT_MAX_PAGE_MODE_MTU \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
- XDP_PACKET_HEADROOM)
+ XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52
@@ -800,6 +801,8 @@ struct bnxt_tx_ring_info {
u32 dev_state;
struct bnxt_ring_struct tx_ring_struct;
+ /* Synchronize simultaneous xdp_xmit on same ring */
+ spinlock_t xdp_tx_lock;
};
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 9c2ad5e67a5d..00f2f80c0073 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -846,13 +846,6 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
- if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
- bnxt_ptp_timecounter_init(bp, false);
- rc = bnxt_ptp_init_rtc(bp, phc_cfg);
- if (rc)
- goto out;
- }
-
if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
return 0;
@@ -861,8 +854,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- if (!(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ if (rc)
+ goto out;
+ } else {
bnxt_ptp_timecounter_init(bp, true);
+ }
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 52fad0fdeacf..03b1d6c04504 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -20,6 +20,8 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len)
@@ -227,11 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
+ if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
+ return -EINVAL;
+
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_lock(&txr->xdp_tx_lock);
+
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
- if (!txr || !bnxt_tx_avail(bp, txr) ||
- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
+ if (!bnxt_tx_avail(bp, txr))
break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
@@ -250,6 +257,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_unlock(&txr->xdp_tx_lock);
+
return nxmit;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0df40c3beb05..067bb5e821f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,6 +10,8 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2dd79af9411b..e87e46c47387 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
- writel(value, offset);
+ writel_relaxed(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
- return readl(offset);
+ return readl_relaxed(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
@@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb;
}
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+ __skb_pull(skb, sizeof(struct status_64));
+}
+
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+ bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -3992,6 +3999,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 800d5ced5800..61284baa0496 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1219,7 +1219,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1258,6 +1257,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue)
unsigned int head = queue->tx_head;
unsigned int tail = queue->tx_tail;
struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue)
if (head == tail)
return;
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+ if (tbqp == head_idx)
+ return;
+
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index f2f1ce81fd9c..0ec65ec634df 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -59,7 +59,7 @@ struct nicpf {
/* MSI-X */
u8 num_vec;
- bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS];
char irq_name[NIC_PF_MSIX_VECTORS][20];
};
@@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
u64 intr;
u8 vf;
- if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
+ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0])
mbx = 0;
else
mbx = 1;
@@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic)
for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq])
- free_irq(pci_irq_vector(nic->pdev, irq), nic);
- nic->irq_allocated[irq] = false;
+ free_irq(nic->irq_allocated[irq], nic);
+ nic->irq_allocated[irq] = 0;
}
}
static int nic_register_interrupts(struct nicpf *nic)
{
- int i, ret;
+ int i, ret, irq;
nic->num_vec = pci_msix_vec_count(nic->pdev);
/* Enable MSI-X */
@@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic)
sprintf(nic->irq_name[i],
"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(pci_irq_vector(nic->pdev, i),
- nic_mbx_intr_handler, 0,
+ irq = pci_irq_vector(nic->pdev, i);
+ ret = request_irq(irq, nic_mbx_intr_handler, 0,
nic->irq_name[i], nic);
if (ret)
goto fail;
- nic->irq_allocated[i] = true;
+ nic->irq_allocated[i] = irq;
}
/* Enable mailbox interrupt */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e7b4e3ed056c..8d719f82854a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
goto out;
na = ret;
- memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
+ memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
strim(p->sn);
- memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
- strim((char *)p->na);
+ memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
+ strim(p->na);
out:
vfree(vpd);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 79df5a72877b..0040dcaab945 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
- if (!dev)
+ if (!dev) {
+ pci_disable_device(pdev);
return -ENOMEM;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1785,6 +1787,7 @@ err_out_free_res:
err_out_free_netdev:
free_netdev (dev);
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index d5356db7539a..5231818943c6 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
- }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */
@@ -1928,6 +1928,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 763d2c7b5fb1..5750f9a56393 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 5f5f8c53c4a0..c8cb541572ff 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
base = of_iomap(node, 0);
if (!base) {
err = -ENOMEM;
- goto err_close;
+ goto err_put;
}
err = fsl_mc_allocate_irqs(mc_dev);
@@ -210,6 +210,8 @@ err_free_mc_irq:
fsl_mc_free_irqs(mc_dev);
err_unmap:
iounmap(base);
+err_put:
+ of_node_put(node);
err_close:
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
err_free_mcp:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 79afb1d7289b..9182631856d5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL;
- /* Do not support TXSTART and TX CSUM offload simutaniously */
- if (ndev->features & NETIF_F_CSUM_MASK)
- return -EBUSY;
-
/* TSD and Qbv are mutually exclusive in hardware */
if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 11227f51404c..9f33ec838b52 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
- return ret;
+ goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
index 5d7aef73df61..fb5120d90f26 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_dev.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -586,8 +586,8 @@ static int fun_get_dev_limits(struct fun_dev *fdev)
/* Calculate the max QID based on SQ/CQ/doorbell counts.
* SQ/CQ doorbells alternate.
*/
- num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) /
- (fdev->db_stride * 4);
+ num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
+ (2 + NVME_CAP_STRIDE(fdev->cap_reg));
fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
fdev->kern_end_qid = fdev->max_qid + 1;
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 7edf8569514c..928d934cb21a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
device_for_each_child_node(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
- if (!mac_cb)
+ if (!mac_cb) {
+ fwnode_handle_put(child);
return -ENOMEM;
+ }
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 0c60f41fca8a..f3c9395d8351 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
- "failed to get tqp stat, ret = %d, tx = %u.\n",
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i);
return ret;
}
@@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
- "failed to get tqp stat, ret = %d, rx = %u.\n",
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 44d9b560b337..93aeb615191d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
for (i = 0; i < ring_num; i++) {
j = 0;
- sprintf(result[j++], "%8u", i);
- sprintf(result[j++], "%9u", ring->tx_copybreak);
- sprintf(result[j++], "%3u", tx_spare->len);
- sprintf(result[j++], "%3u", tx_spare->next_to_use);
- sprintf(result[j++], "%3u", tx_spare->next_to_clean);
- sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u", ring->tx_copybreak);
+ sprintf(result[j++], "%u", tx_spare->len);
+ sprintf(result[j++], "%u", tx_spare->next_to_use);
+ sprintf(result[j++], "%u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%u", tx_spare->last_to_clean);
sprintf(result[j++], "%pad", &tx_spare->dma);
hns3_dbg_fill_content(content, sizeof(content),
tx_spare_info_items,
@@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
+ sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%9u", ring->rx_copybreak);
+ sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
@@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG));
- sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
@@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%5d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
@@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%6d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
sprintf(result[j++], "%#x",
le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%10u",
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
+ sprintf(result[j++], "%u",
le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
}
static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 14dc12c2155d..a3ee7875d6a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5203,6 +5203,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
}
+static void hns3_state_uninit(struct hnae3_handle *handle)
+{
+ struct hns3_nic_priv *priv = handle->priv;
+
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -5320,7 +5327,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_reg_netdev_fail:
+ hns3_state_uninit(handle);
hns3_dbg_uninit(handle);
+ hns3_client_stop(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 6799d16de34b..7998ca617a92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -94,6 +94,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
enum hclge_comm_cmd_status status;
struct hclge_desc desc;
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -176,7 +183,7 @@ static int hclge_get_ring_chain_from_mbx(
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
- return -ENOMEM;
+ return -EINVAL;
for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
@@ -587,9 +594,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
return hclge_set_vport_mtu(vport, mtu);
}
-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
@@ -599,17 +606,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid);
- return;
+ return -EINVAL;
}
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
+ return 0;
}
-static void hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
@@ -627,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
- return;
+ return -EINVAL;
}
memcpy(resp_msg->data,
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -809,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"VF fail(%d) to set mtu\n", ret);
break;
case HCLGE_MBX_GET_QID_IN_PF:
- hclge_get_queue_id_in_pf(vport, req, &resp_msg);
+ ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_RSS_KEY:
- hclge_get_rss_key(vport, req, &resp_msg);
+ ret = hclge_get_rss_key(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 2d9b06d7caad..f7dc7d825f63 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -771,7 +771,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
@@ -841,7 +841,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*cons_idx = curr_cons_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 77683909ca3d..5c5931dba51d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3210,13 +3210,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
- } else {
- ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- }
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -3231,23 +3226,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
+ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
+ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+ netdev_err(netdev, "Invalid request.\n");
+ netdev_err(netdev, "Max tx buffers = %llu\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ netdev_err(netdev, "Max rx buffers = %llu\n",
+ adapter->max_tx_entries_per_subcrq);
+ return -EINVAL;
+ }
+
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
- adapter->req_tx_entries_per_subcrq != ring->tx_pending))
- netdev_info(netdev,
- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- ring->rx_pending, ring->tx_pending,
- adapter->req_rx_add_entries_per_subcrq,
- adapter->req_tx_entries_per_subcrq);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -3255,14 +3248,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
- } else {
- channels->max_rx = IBMVNIC_MAX_QUEUES;
- channels->max_tx = IBMVNIC_MAX_QUEUES;
- }
-
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -3275,22 +3262,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_queues != channels->rx_count ||
- adapter->req_tx_queues != channels->tx_count))
- netdev_info(netdev,
- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- channels->rx_count, channels->tx_count,
- adapter->req_rx_queues, adapter->req_tx_queues);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3298,43 +3274,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
- i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ if (stringset != ETH_SS_STATS)
+ return;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
- }
- break;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- ibmvnic_priv_flags[i]);
- break;
- default:
- return;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
}
}
@@ -3347,8 +3312,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
- case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -3401,26 +3364,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
- return adapter->priv_flags;
-}
-
-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
-
- if (which_maxes)
- adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
- else
- adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
-
- return 0;
-}
-
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -3434,8 +3377,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
- .get_priv_flags = ibmvnic_get_priv_flags,
- .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 8f5cefb932dd..1310c861bf83 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -41,11 +41,6 @@
#define IBMVNIC_RESET_DELAY 100
-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
-#define IBMVNIC_USE_SERVER_MAXES 0x1
- "use-server-maxes"
-};
-
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -883,7 +878,6 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
- u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d60e2016d03c..e6c8e6d5234f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
- u16 lat_enc_d = 0; /* latency decoded */
+ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6778df2177a1..98871f014994 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7549,42 +7549,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
- struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list))
- return -EINVAL;
-
/* Go through the list and find an available channel */
- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
- if (!i40e_is_channel_macvlan(ch)) {
- ch->fwd = fwd;
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
- ch->num_queue_pairs,
- ch->base_queue);
- for (i = 0; i < ch->num_queue_pairs; i++) {
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
- pf_q = ch->base_queue + i;
+ pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
- tx_ring->ch = ch;
+ tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
- rx_ring->ch = ch;
+ rx_ring->ch = iter;
}
+ ch = iter;
break;
}
}
+ if (!ch)
+ return -EINVAL;
+
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 190590d32faf..7dfcf78b57fb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2871,7 +2871,6 @@ continue_reset:
running = adapter->state == __IAVF_RUNNING;
if (running) {
- netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -2988,7 +2987,7 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
- netdev->flags |= IFF_UP;
+
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
@@ -3004,10 +3003,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running) {
+ if (running)
iavf_change_state(adapter, __IAVF_RUNNING);
- netdev->flags |= IFF_UP;
- }
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d4f1874df7d0..a895e3a8e988 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -301,7 +301,6 @@ enum ice_vsi_state {
ICE_VSI_NETDEV_REGISTERED,
ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED,
- ICE_VSI_VLAN_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED,
ICE_VSI_STATE_NBITS /* must be last */
};
@@ -541,6 +540,7 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
+ struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver;
@@ -672,7 +672,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 5daade32ea62..fba178e07600 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
- if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ if (!vsi || vsi->type != ICE_VSI_PF)
return;
netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
int base_idx, i;
if (!vsi || vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ return 0;
pf = vsi->back;
netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
if (!pf_vsi)
return;
- ice_free_cpu_rx_rmap(pf_vsi);
ice_clear_arfs(pf_vsi);
}
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
return;
ice_remove_arfs(pf);
- if (ice_set_cpu_rx_rmap(pf_vsi)) {
- dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
- return;
- }
ice_init_arfs(pf_vsi);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9a84d746a6c4..6a463b242c7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
- if (ice_is_reset_in_progress(vsi->back->state))
+ if (ice_is_reset_in_progress(vsi->back->state) ||
+ test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
repr = ice_netdev_to_repr(netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index bd58d9d2e565..6a413331572b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int ice_eswitch_rebuild(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index af57eb114966..85a94483c2ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -58,7 +58,16 @@ int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
u8 promisc_mask)
{
- return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+ if (result)
+ dev_err(ice_pf_to_dev(pf),
+ "Error setting promisc mode on VSI %i (rc=%d)\n",
+ vsi->vsi_num, result);
+
+ return result;
}
/**
@@ -73,7 +82,16 @@ int
ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
u8 promisc_mask)
{
- return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+ if (result)
+ dev_err(ice_pf_to_dev(pf),
+ "Error clearing promisc mode on VSI %i (rc=%d)\n",
+ vsi->vsi_num, result);
+
+ return result;
}
/**
@@ -87,7 +105,16 @@ int
ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
- return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ if (result)
+ dev_err(ice_pf_to_dev(pf),
+ "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
+ ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+ return result;
}
/**
@@ -101,7 +128,16 @@ int
ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
- return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ if (result)
+ dev_err(ice_pf_to_dev(pf),
+ "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
+ ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+ return result;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 25a436d342c2..3e3b2ed4cd5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
if (WARN_ON_ONCE(!in_task()))
return;
+ mutex_lock(&pf->adev_mutex);
if (!pf->adev)
- return;
+ goto finish;
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
+finish:
+ mutex_unlock(&pf->adev_mutex);
}
/**
@@ -290,7 +293,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
return -ENOMEM;
adev = &iadev->adev;
- pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
@@ -300,18 +302,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
ret = auxiliary_device_init(adev);
if (ret) {
- pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
- pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
+ mutex_lock(&pf->adev_mutex);
+ pf->adev = adev;
+ mutex_unlock(&pf->adev_mutex);
+
return 0;
}
@@ -320,12 +324,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
- if (!pf->adev)
- return;
+ struct auxiliary_device *adev;
- auxiliary_device_delete(pf->adev);
- auxiliary_device_uninit(pf->adev);
+ mutex_lock(&pf->adev_mutex);
+ adev = pf->adev;
pf->adev = NULL;
+ mutex_unlock(&pf->adev_mutex);
+
+ if (adev) {
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index b897926f817d..454e01ae09b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1480,6 +1480,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
+ ring->txq_teid = ICE_INVAL_TEID;
if (dvm_ena)
ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
else
@@ -2688,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
+ ice_free_cpu_rx_rmap(vsi);
+
ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2701,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
continue;
/* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
@@ -2983,6 +2987,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
+ if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+ ice_clear_dflt_vsi(pf->first_sw);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
@@ -3037,8 +3043,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- coalesce[i].itr_tx = q_vector->tx.itr_setting;
- coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].itr_tx = q_vector->tx.itr_settings;
+ coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq)
@@ -3094,21 +3100,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[i].itr_rx;
+ rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
}
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[i].itr_tx;
+ rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
}
@@ -3122,12 +3128,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) {
/* transmit */
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
/* receive */
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b588d7995631..963a5f40e071 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -243,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
{
return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
- test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
- test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+ test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
}
/**
@@ -260,10 +259,15 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (ice_vsi_has_non_zero_vlans(vsi))
- status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
- else
- status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ if (ice_vsi_has_non_zero_vlans(vsi)) {
+ promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
+ promisc_m);
+ } else {
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ promisc_m, 0);
+ }
+
return status;
}
@@ -280,10 +284,15 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (ice_vsi_has_non_zero_vlans(vsi))
- status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
- else
- status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ if (ice_vsi_has_non_zero_vlans(vsi)) {
+ promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
+ promisc_m);
+ } else {
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ promisc_m, 0);
+ }
+
return status;
}
@@ -302,7 +311,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 changed_flags = 0;
- u8 promisc_m;
int err;
if (!vsi->netdev)
@@ -320,7 +328,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (ice_vsi_fltr_changed(vsi)) {
clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
- clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh(netdev);
@@ -369,29 +376,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (ice_vsi_has_non_zero_vlans(vsi))
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
-
- err = ice_set_promisc(vsi, promisc_m);
+ err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
if (err) {
- netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
- vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (ice_vsi_has_non_zero_vlans(vsi))
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
-
- err = ice_clear_promisc(vsi, promisc_m);
+ err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
if (err) {
- netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
- vsi->vsi_num);
vsi->current_netdev_flags |= IFF_ALLMULTI;
goto out_promisc;
}
@@ -2517,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
+ err = ice_set_cpu_rx_rmap(vsi);
+ if (err) {
+ netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+ vsi->vsi_num, ERR_PTR(err));
+ goto free_q_irqs;
+ }
+
vsi->irqs_ready = true;
return 0;
@@ -2569,7 +2569,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
- tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ tx_desc->cmd_type_offset_bsz = 0;
}
}
@@ -2765,8 +2765,10 @@ free_qmap:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
- if (vsi->xdp_rings[i]->desc)
+ if (vsi->xdp_rings[i]->desc) {
+ synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
@@ -3488,6 +3490,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
+ /* Add multicast promisc rule for the VLAN ID to be added if
+ * all-multicast is currently enabled.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS,
+ vid);
+ if (ret)
+ goto finish;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
@@ -3495,8 +3511,23 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
*/
vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
ret = vlan_ops->add_vlan(vsi, &vlan);
- if (!ret)
- set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+ if (ret)
+ goto finish;
+
+ /* If all-multicast is currently enabled and this VLAN ID is only one
+ * besides VLAN-0 we have to update look-up type of multicast promisc
+ * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
+ */
+ if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
+ ice_vsi_num_non_zero_vlans(vsi) == 1) {
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_PROMISC_BITS, 0);
+ ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
+ }
+
+finish:
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return ret;
}
@@ -3522,6 +3553,9 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
@@ -3530,10 +3564,33 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
ret = vlan_ops->del_vlan(vsi, &vlan);
if (ret)
- return ret;
+ goto finish;
- set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
- return 0;
+ /* Remove multicast promisc rule for the removed VLAN ID if
+ * all-multicast is enabled.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI)
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ /* Update look-up type of multicast promisc rule for VLAN 0
+ * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
+ * all-multicast is enabled and VLAN 0 is the only VLAN rule.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS,
+ 0);
+ ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_PROMISC_BITS, 0);
+ }
+ }
+
+finish:
+ clear_bit(ICE_CFG_BUSY, vsi->state);
+
+ return ret;
}
/**
@@ -3642,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
- status = ice_set_cpu_rx_rmap(vsi);
- if (status) {
- dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
- vsi->vsi_num, status);
- goto unroll_napi_add;
- }
status = ice_init_mac_fltr(pf);
if (status)
- goto free_cpu_rx_map;
+ goto unroll_napi_add;
return 0;
-free_cpu_rx_map:
- ice_free_cpu_rx_rmap(vsi);
unroll_napi_add:
ice_tc_indir_block_unregister(vsi);
unroll_cfg_netdev:
@@ -3720,6 +3769,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
@@ -3798,6 +3848,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@ -5117,7 +5168,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
- ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
@@ -5475,16 +5525,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* Add filter for new MAC. If filter exists, return success */
err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (err == -EEXIST)
+ if (err == -EEXIST) {
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- else if (err)
+
+ return 0;
+ } else if (err) {
/* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
+ }
err_update_filters:
if (err) {
@@ -6119,9 +6172,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
- /* clear this now, and the first stats read will be used as baseline */
- vsi->stat_offsets_loaded = false;
-
+ /* Perform an initial read of the statistics registers now to
+ * set the baseline so counters are ready when interface is up
+ */
+ ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;
@@ -6878,12 +6932,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
+#define ICE_EMP_RESET_SLEEP_MS 5000
if (reset_type == ICE_RESET_EMPR) {
/* If an EMP reset has occurred, any previously pending flash
* update will have completed. We no longer know whether or
* not the NVM update EMP reset is restricted.
*/
pf->fw_emp_reset_disabled = false;
+
+ msleep(ICE_EMP_RESET_SLEEP_MS);
}
err = ice_init_all_ctrlq(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 4eb0599714f4..13cdb5ea594d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
orom_data, hw->flash.banks.orom_size);
if (status) {
+ vfree(orom_data);
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a1cd33273ca4..662947c882e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -500,12 +500,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
* This function must be called periodically to ensure that the cached value
* is never more than 2 seconds old. It must also be called whenever the PHC
* time has been changed.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
*/
-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
{
u64 systime;
int i;
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
/* Read the current PHC time */
systime = ice_ptp_read_src_clk_reg(pf, NULL);
@@ -528,6 +535,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
}
}
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
}
/**
@@ -2287,6 +2297,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
/**
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @hw: pointer to the hw struct
* @tx: PTP Tx tracker to clean up
*
* Loop through the Tx timestamp requests and see if any of them have been
@@ -2295,7 +2306,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* timestamp will never be captured. This might happen if the packet gets
* discarded before it reaches the PHY timestamping block.
*/
-static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
+static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
{
u8 idx;
@@ -2304,11 +2315,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
for_each_set_bit(idx, tx->in_use, tx->len) {
struct sk_buff *skb;
+ u64 raw_tstamp;
/* Check if this SKB has been waiting for too long */
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
continue;
+ /* Read tstamp to be able to use this register again */
+ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+ &raw_tstamp);
+
spin_lock(&tx->lock);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -2324,17 +2340,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ int err;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
- ice_ptp_update_cached_phctime(pf);
+ err = ice_ptp_update_cached_phctime(pf);
- ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+ ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
- /* Run twice a second */
+ /* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
- msecs_to_jiffies(500));
+ msecs_to_jiffies(err ? 10 : 500));
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 8915a9d39e36..0c438219f7a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1046,8 +1046,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
- ice_mbx_deinit_snapshot(&pf->hw);
ice_free_vfs(pf);
+ ice_mbx_deinit_snapshot(&pf->hw);
if (pf->lag)
ice_enable_lag(pf->lag);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cead3eb149bd..ffb3f6a589da 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -384,9 +384,14 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored)
*/
- u16 itr_setting:13;
- u16 itr_reserved:2;
- u16 itr_mode:1;
+ union {
+ struct {
+ u16 itr_setting:13;
+ u16 itr_reserved:2;
+ u16 itr_mode:1;
+ };
+ u16 itr_settings;
+ };
enum ice_container_type type;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 3f1a63815bac..2889e050a4c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1308,12 +1308,51 @@ error_param:
}
/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+static int
+ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
* ice_vc_dis_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * called from the VF to disable all or specific
- * queue(s)
+ * called from the VF to disable all or specific queue(s)
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
@@ -1350,30 +1389,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
- struct ice_txq_meta txq_meta = { 0 };
-
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
- ring, &txq_meta)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->txq_ena);
}
}
@@ -1622,6 +1646,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3625,6 +3657,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
return;
}
+ mutex_lock(&vf->cfg_lock);
+
/* Check if VF is disabled. */
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
err = -EPERM;
@@ -3642,32 +3676,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EINVAL;
}
- if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
- ice_vc_send_msg_to_vf(vf, v_opcode,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
- 0);
- ice_put_vf(vf);
- return;
- }
-
error_handler:
if (err) {
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
NULL, 0);
dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
- ice_put_vf(vf);
- return;
+ goto finish;
}
- /* VF is being configured in another context that triggers a VFR, so no
- * need to process this message
- */
- if (!mutex_trylock(&vf->cfg_lock)) {
- dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
- vf->vf_id);
- ice_put_vf(vf);
- return;
+ if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+ ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+ 0);
+ goto finish;
}
switch (v_opcode) {
@@ -3780,6 +3802,7 @@ error_handler:
vf_id, v_opcode, err);
}
+finish:
mutex_unlock(&vf->cfg_lock);
ice_put_vf(vf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index dfbcaf08520e..9dd38f667059 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ synchronize_rcu();
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -413,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*/
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
- u32 nb_buffs_extra = 0, nb_buffs;
u16 ntu = rx_ring->next_to_use;
u16 total_count = count;
struct xdp_buff **xdp;
@@ -426,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
rx_desc,
rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
rx_desc = ICE_RX_DESC(rx_ring, 0);
xdp = ice_xdp_buf(rx_ring, 0);
ntu = 0;
@@ -439,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
if (ntu == rx_ring->count)
ntu = 0;
+exit:
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
@@ -918,7 +925,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 34b33b21e0dc..68be2976f539 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
- if (adapter->link_speed != SPEED_1000)
+ if (adapter->link_speed != SPEED_1000 ||
+ !hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 66ea566488d1..59d5c467ea6e 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 40dbf4b43234..6961f65d36b9 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0d6e3215e98f..653e9f1e35b5 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_EN;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- if (pci_device_is_present(adapter->pdev))
+ if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
+ igc_ptm_stop(adapter);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index e596e1a9fc75..69d11ff7677d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
- if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+ sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
+ if (sam->flags != XFRM_OFFLOAD_INBOUND) {
err = -EOPNOTSUPP;
goto err_out;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5f9ab1842d49..c18801490649 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ret = of_get_mac_address(pnp, ppd.mac_addr);
- if (ret)
+ if (ret == -EPROBE_DEFER)
return ret;
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 3ad10c793308..66298e2235c9 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -395,7 +395,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 32d83421226a..5897940a418b 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
break;
ss->regmap[i] = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index 538adab6878b..c5b560a8b026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
u32 mkey;
+ u32 number_of_menu_items;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
return -EINVAL;
}
-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
+
+static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
+ int read_size, int start_idx)
{
void *data = page_address(page);
enum mlx5_sgmt_type sgmt_idx;
int num_of_items;
char *sgmt_name;
void *member;
+ int size = 0;
void *menu;
int i;
- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+ if (!start_idx) {
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
+ num_of_records);
+ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
+ data += size;
+ }
+ num_of_items = rsc_dump->number_of_menu_items;
+
+ for (i = 0; start_idx + i < num_of_items; i++) {
+ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
+ if (size >= read_size)
+ return start_idx + i;
- for (i = 0; i < num_of_items; i++) {
- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
if (sgmt_idx == -EINVAL)
@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
member, segment_type);
}
+ return 0;
}
static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
struct mlx5_rsc_dump_cmd *cmd = NULL;
struct mlx5_rsc_key key = {};
struct page *page;
+ int start_idx = 0;
int size;
int err;
@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
if (err < 0)
goto destroy_cmd;
- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
} while (err > 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 673f1c82d381..c9d5d8d93994 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
- xoff, &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
+ port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index af37a8d247a1..2755c25ba324 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -145,8 +145,7 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
flow_action_for_each(i, act, flow_action) {
tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act || !tc_act->post_parse ||
- !tc_act->can_offload(parse_state, act, i, attr))
+ if (!tc_act || !tc_act->post_parse)
continue;
err = tc_act->post_parse(parse_state, priv, attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index b9d38fe807df..a829c94289c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -45,12 +45,41 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
- if (!clear_action) {
+ if (clear_action) {
+ parse_state->ct_clear = true;
+ } else {
attr->flags |= MLX5_ATTR_FLAG_CT;
flow_flag_set(parse_state->flow, CT);
parse_state->ct = true;
}
- parse_state->ct_clear = clear_action;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts;
+ int err;
+
+ /* If ct action exist, we can ignore previous ct_clear actions */
+ if (parse_state->ct)
+ return 0;
+
+ if (parse_state->ct_clear) {
+ err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Failed to set registers for ct clear");
+ return err;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ /* Prevent handling of additional, redundant clear actions */
+ parse_state->ct_clear = false;
+ }
return 0;
}
@@ -70,5 +99,6 @@ struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
+ .post_parse = tc_act_post_parse_ct,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 59988e24b704..bec9ed0103a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
};
struct mlx5_ct_fs_smfs_matchers {
- struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
+ struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
struct list_head used;
};
@@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
};
static inline void
-mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
+mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
+ bool gre)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
@@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
ntohs(MLX5_CT_TCP_FLAGS_MASK));
- } else {
+ } else if (!gre) {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
}
@@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
static struct mlx5dr_matcher *
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
- bool tcp, u32 priority)
+ bool tcp, bool gre, u32 priority)
{
struct mlx5dr_matcher *dr_matcher;
struct mlx5_flow_spec *spec;
@@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
if (!spec)
return ERR_PTR(-ENOMEM);
- mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
+ mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
@@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
}
static struct mlx5_ct_fs_smfs_matcher *
-mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
+mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
{
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
@@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
int prio;
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
- smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
+ smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
if (refcount_inc_not_zero(&smfs_matcher->ref))
return smfs_matcher;
@@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
}
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
- dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
+ dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
- nat, ipv4, tcp, PTR_ERR(dr_matcher));
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
@@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
static inline bool
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
{
-#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
- const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
- DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
- const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
- const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
- const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
- const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
+#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
+ const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
+ const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
+ const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
+ const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp);
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
}
static bool
@@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
flow_rule_match_control(flow_rule, &control);
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- flow_rule_match_ports(flow_rule, &ports);
- flow_rule_match_tcp(flow_rule, &tcp);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
if (basic.mask->n_proto != htons(0xFFFF) ||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
basic.key->ip_proto, basic.mask->ip_proto);
return false;
}
- if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
ports.mask->src, ports.mask->dst);
return false;
@@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5dr_action *actions[5];
struct mlx5dr_rule *rule;
int num_actions = 0, err;
- bool nat, tcp, ipv4;
+ bool nat, tcp, ipv4, gre;
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
@@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
tcp = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
- smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
+ smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
if (IS_ERR(smfs_matcher)) {
err = PTR_ERR(smfs_matcher);
goto err_matcher;
}
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
+ spec->flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto err_create;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e49f51124c74..ab4b0f3ee2a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -582,6 +582,12 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return 0;
}
+int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
+}
+
static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
char *modact)
@@ -1410,9 +1416,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
- bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
- int err;
-
if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
@@ -1423,17 +1426,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
- if (!clear_action)
- goto out;
-
- err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
- return err;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
-out:
return 0;
}
@@ -1749,6 +1741,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
+ struct mlx5e_priv *priv;
+
if (!refcount_dec_and_test(&ft->refcount))
return;
@@ -1758,6 +1752,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
+ priv = netdev_priv(ct_priv->netdev);
+ flush_workqueue(priv->wq);
mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 36d3652bf829..00a3ba862afb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -129,6 +129,10 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+int
+mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -171,6 +175,13 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
}
static inline int
+mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 378fc8e3bd97..d87bbb0be7c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -713,6 +713,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct net_device *filter_dev)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_int_port *int_port;
TC_TUN_ROUTE_ATTR_INIT(attr);
u16 vport_num;
@@ -747,7 +748,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.vxlan_vni);
esw_attr->rx_tun_attr->decap_vport = vport_num;
- } else if (netif_is_ovs_master(attr.route_dev)) {
+ } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
attr.route_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index a55b066746cb..857840ab1e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
bool busy = false;
int work_done = 0;
+ rcu_read_lock();
+
ch_stats->poll++;
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
busy |= rq->post_wqes(rq);
- if (busy)
- return budget;
+ if (busy) {
+ work_done = budget;
+ goto out;
+ }
if (unlikely(!napi_complete_done(napi, work_done)))
- return work_done;
+ goto out;
mlx5e_cq_arm(&rq->cq);
+
+out:
+ rcu_read_unlock();
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index d659fe07d464..8ead2c82a52a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1200,6 +1200,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
return err;
WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
+ /*
+ * Align the driver state with the register state.
+ * Temporary state change is required to enable the app list reset.
+ */
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
+ mlx5e_dcbnl_delete_app(priv);
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+ }
+
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2f1dedc721d1..fa229998606c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+ features &= ~NETIF_F_GRO_HW;
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+
return features;
}
@@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ if (params->xdp_prog) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
+ if (priv->xsk.refcnt) {
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
@@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (!!MLX5_CAP_GEN(mdev, shampo) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev))
- netdev->hw_features |= NETIF_F_GRO_HW;
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e3fc15ae7bb1..ac0f73074f7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2459,6 +2459,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
+ match.mask->vlan_eth_type &&
+ MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
+ ft_field_support.outer_second_vid,
+ fs_type)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_cvlan_tag, 1);
+ spec->match_criteria_enable |=
+ MLX5_MATCH_MISC_PARAMETERS;
+ }
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3f63df127091..3b151332e2f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -139,7 +139,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
- if (esw_attr->int_port)
+ if (attr && !attr->chain && esw_attr->int_port)
metadata =
mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 816d991f7621..3ad67e6b5586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
-
- cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->fdb_root_ns);
- steering->fdb_root_ns = NULL;
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- cleanup_root_ns(steering->port_sel_root_ns);
- cleanup_root_ns(steering->sniffer_rx_root_ns);
- cleanup_root_ns(steering->sniffer_tx_root_ns);
- cleanup_root_ns(steering->rdma_rx_root_ns);
- cleanup_root_ns(steering->rdma_tx_root_ns);
- cleanup_root_ns(steering->egress_root_ns);
- mlx5_cleanup_fc_stats(dev);
- kmem_cache_destroy(steering->ftes_cache);
- kmem_cache_destroy(steering->fgs_cache);
- mlx5_ft_pool_destroy(dev);
- kfree(steering);
-}
-
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
@@ -3086,42 +3064,27 @@ cleanup:
return err;
}
-int mlx5_init_fs(struct mlx5_core_dev *dev)
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
- struct mlx5_flow_steering *steering;
- int err = 0;
-
- err = mlx5_init_fc_stats(dev);
- if (err)
- return err;
-
- err = mlx5_ft_pool_init(dev);
- if (err)
- return err;
-
- steering = kzalloc(sizeof(*steering), GFP_KERNEL);
- if (!steering) {
- err = -ENOMEM;
- goto err;
- }
-
- steering->dev = dev;
- dev->priv.steering = steering;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
- if (mlx5_fs_dr_is_supported(dev))
- steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
- else
- steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ cleanup_root_ns(steering->root_ns);
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ cleanup_root_ns(steering->egress_root_ns);
+}
- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
- sizeof(struct mlx5_flow_group), 0,
- 0, NULL);
- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
- 0, NULL);
- if (!steering->ftes_cache || !steering->fgs_cache) {
- err = -ENOMEM;
- goto err;
- }
+int mlx5_fs_core_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err = 0;
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
@@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
}
return 0;
+
+err:
+ mlx5_fs_core_cleanup(dev);
+ return err;
+}
+
+void mlx5_fs_core_free(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
+ kfree(steering);
+ mlx5_ft_pool_destroy(dev);
+ mlx5_cleanup_fc_stats(dev);
+}
+
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering;
+ int err = 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ err = mlx5_ft_pool_init(dev);
+ if (err)
+ goto err;
+
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
+ if (mlx5_fs_dr_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
err:
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_free(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c488a7c5b07e..3f20523e514f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
-int mlx5_init_fs(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
+void mlx5_fs_core_free(struct mlx5_core_dev *dev);
+int mlx5_fs_core_init(struct mlx5_core_dev *dev);
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 4aa22dce9b77..81eb67fb95b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -8,7 +8,8 @@
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
- MLX5_FW_RESET_FLAGS_PENDING_COMP
+ MLX5_FW_RESET_FLAGS_PENDING_COMP,
+ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
};
struct mlx5_fw_reset {
@@ -155,6 +156,28 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
}
}
+static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ del_timer_sync(&fw_reset->timer);
+}
+
+static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
+ mlx5_core_warn(dev, "Reset request was already cleared\n");
+ return -EALREADY;
+ }
+
+ mlx5_stop_sync_reset_poll(dev);
+ if (poll_health)
+ mlx5_start_health_poll(dev);
+ return 0;
+}
+
static void mlx5_sync_reset_reload_work(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -162,6 +185,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
+ mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
mlx5_unload_one(dev);
err = mlx5_health_wait_pci_up(dev);
@@ -171,23 +195,6 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_fw_reset_complete_reload(dev);
}
-static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
-{
- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
-
- del_timer_sync(&fw_reset->timer);
-}
-
-static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
-{
- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
-
- mlx5_stop_sync_reset_poll(dev);
- clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
- if (poll_health)
- mlx5_start_health_poll(dev);
-}
-
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
static void poll_sync_reset(struct timer_list *t)
{
@@ -202,8 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
if (fatal_error) {
mlx5_core_warn(dev, "Got Device Reset\n");
- mlx5_sync_reset_clear_reset_requested(dev, false);
- queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
+ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ else
+ mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
return;
}
@@ -229,13 +238,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
}
-static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
+static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
+ mlx5_core_warn(dev, "Reset request was already set\n");
+ return -EALREADY;
+ }
mlx5_stop_health_poll(dev, true);
- set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
mlx5_start_sync_reset_poll(dev);
+ return 0;
}
static void mlx5_fw_live_patch_event(struct work_struct *work)
@@ -264,7 +277,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
err ? "Failed" : "Sent");
return;
}
- mlx5_sync_reset_set_reset_requested(dev);
+ if (mlx5_sync_reset_set_reset_requested(dev))
+ return;
+
err = mlx5_fw_reset_set_reset_sync_ack(dev);
if (err)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
@@ -362,7 +377,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- mlx5_sync_reset_clear_reset_requested(dev, false);
+ if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ return;
mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
@@ -391,10 +407,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
-
- mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
@@ -423,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
struct mlx5_eqe *eqe = data;
+ if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
+ return NOTIFY_DONE;
+
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
- queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
break;
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
mlx5_sync_reset_events_handle(fw_reset, eqe);
@@ -469,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
}
+void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
+ cancel_work_sync(&fw_reset->fw_live_patch_work);
+ cancel_work_sync(&fw_reset->reset_request_work);
+ cancel_work_sync(&fw_reset->reset_reload_work);
+ cancel_work_sync(&fw_reset->reset_now_work);
+ cancel_work_sync(&fw_reset->reset_abort_work);
+}
+
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index 694fc7cb2684..dc141c7e641a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
+void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 4a6ec15ef046..d6c3e6dfd71f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
flush_workqueue(mp->wq);
}
+static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len)
+{
+ mp->fib.mfi = fi;
+ mp->fib.priority = fi->fib_priority;
+ mp->fib.dst = dst;
+ mp->fib.dst_len = dst_len;
+}
+
struct mlx5_fib_event_work {
struct work_struct work;
struct mlx5_lag *ldev;
@@ -110,10 +118,10 @@ struct mlx5_fib_event_work {
};
};
-static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
- unsigned long event,
- struct fib_info *fi)
+static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
+ struct fib_entry_notifier_info *fen_info)
{
+ struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
struct fib_nh *fib_nh0, *fib_nh1;
unsigned int nhs;
@@ -121,13 +129,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
/* stop track */
- if (mp->mfi == fi)
- mp->mfi = NULL;
+ if (mp->fib.mfi == fi)
+ mp->fib.mfi = NULL;
return;
}
/* Handle multipath entry with lower priority value */
- if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
+ if (mp->fib.mfi && mp->fib.mfi != fi &&
+ (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
+ fi->fib_priority >= mp->fib.priority)
return;
/* Handle add/replace event */
@@ -143,9 +153,9 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
i++;
mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
- mp->mfi = fi;
return;
}
@@ -165,7 +175,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
/* First time we see multipath route */
- if (!mp->mfi && !__mlx5_lag_is_active(ldev)) {
+ if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
struct lag_tracker tracker;
tracker = ldev->tracker;
@@ -173,7 +183,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
- mp->mfi = fi;
+ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
@@ -184,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct lag_mp *mp = &ldev->lag_mp;
/* Check the nh event is related to the route */
- if (!mp->mfi || mp->mfi != fi)
+ if (!mp->fib.mfi || mp->fib.mfi != fi)
return;
/* nh added/removed */
@@ -214,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work)
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
- fib_work->fen_info.fi);
+ &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
@@ -313,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
/* Clear mfi, as it might become stale when a route delete event
* has been missed, see mlx5_lag_fib_route_event().
*/
- ldev->lag_mp.mfi = NULL;
+ ldev->lag_mp.fib.mfi = NULL;
}
int mlx5_lag_mp_init(struct mlx5_lag *ldev)
@@ -324,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
/* always clear mfi, as it might become stale when a route delete event
* has been missed
*/
- mp->mfi = NULL;
+ mp->fib.mfi = NULL;
if (mp->fib_nb.notifier_call)
return 0;
@@ -354,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
- mp->mfi = NULL;
+ mp->fib.mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
index 57af962cad29..056a066da604 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
@@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity {
struct lag_mp {
struct notifier_block fib_nb;
- struct fib_info *mfi; /* used in tracking fib events */
+ struct {
+ const void *mfi; /* used in tracking fib events */
+ u32 priority;
+ u32 dst;
+ int dst_len;
+ } fib;
struct workqueue_struct *wq;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index a6592f9c3c05..5be322528279 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -505,7 +505,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
struct ttc_params ttc_params = {};
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
- port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->inner.ttc))
return PTR_ERR(port_sel->inner.ttc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index b63dec24747a..b78f2ba25c19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -408,6 +408,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (test_bit(tt, params->ignore_dests))
+ continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt],
ttc_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2589e39eb9c7..ef196cb764e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -938,6 +938,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_sf_table_cleanup;
}
+ err = mlx5_fs_core_alloc(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc flow steering\n");
+ goto err_fs;
+ }
+
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@@ -948,6 +954,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0;
+err_fs:
+ mlx5_sf_table_cleanup(dev);
err_sf_table_cleanup:
mlx5_sf_hw_table_cleanup(dev);
err_sf_hw_table_cleanup:
@@ -985,6 +993,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
+ mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev);
mlx5_sf_hw_table_cleanup(dev);
mlx5_vhca_event_cleanup(dev);
@@ -1191,7 +1200,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_tls_start;
}
- err = mlx5_init_fs(dev);
+ err = mlx5_fs_core_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs;
@@ -1236,7 +1245,7 @@ err_ec:
err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_cleanup(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
err_tls_start:
@@ -1265,7 +1274,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_cleanup(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
@@ -1618,6 +1627,10 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
+ /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+ * fw_reset before unregistering the devlink.
+ */
+ mlx5_drain_fw_reset(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 850937cd8bf9..1383550f44c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
return 0;
}
+static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_actions_attr *attr,
+ bool rx_rule,
+ bool *recalc_cs_required)
+{
+ *recalc_cs_required = false;
+
+ /* if device supports csum recalculation - no adjustment needed */
+ if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
+ return;
+
+ /* no adjustment needed on TX rules */
+ if (!rx_rule)
+ return;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
+ /* Ignore the modify TTL action.
+ * It is always kept as last HW action.
+ */
+ attr->modify_actions--;
+ return;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+ /* Due to a HW bug on some devices, modifying TTL on RX flows
+ * will cause an incorrect checksum calculation. In such cases
+ * we will use a FW table to recalculate the checksum.
+ */
+ *recalc_cs_required = true;
+}
+
static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
struct mlx5dr_action *actions[],
int last_idx)
@@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite->index;
attr.modify_actions = action->rewrite->num_of_actions;
- recalc_cs_required = action->rewrite->modify_ttl &&
- !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
+ if (action->rewrite->modify_ttl)
+ dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
+ &recalc_cs_required);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
@@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
- /* Due to a HW bug in some devices, modifying TTL on RX flows will
- * cause an incorrect checksum calculation. In this case we will
- * use a FW table to recalculate.
- */
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
- rx_rule && recalc_cs_required && dest_action) {
+ if (recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
mlx5dr_err(dmn,
@@ -842,7 +869,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
@@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req,
&action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (ret)
goto free_action;
@@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
-static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
-{
- return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
- !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
-}
-
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
@@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite->dmn;
+ __be64 *modify_ttl_sw_action = NULL;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
@@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
action->rewrite->allow_rx = 1;
action->rewrite->allow_tx = 1;
- for (i = 0; i < num_sw_actions; i++) {
- sw_action = &sw_actions[i];
+ for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
+ /* modify TTL is handled separately, as a last action */
+ if (i == num_sw_actions) {
+ sw_action = modify_ttl_sw_action;
+ modify_ttl_sw_action = NULL;
+ } else {
+ sw_action = &sw_actions[i];
+ }
ret = dr_action_modify_check_field_limitation(action,
sw_action);
@@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (!(*modify_ttl) &&
dr_action_modify_check_is_ttl_modify(sw_action)) {
- if (dr_action_modify_ttl_ignore(dmn))
- continue;
-
+ modify_ttl_sw_action = sw_action;
*modify_ttl = true;
+ continue;
}
/* Convert SW action to HW action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 68a4c32d5f34..f05ef0cd54ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val;
fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level;
+ fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 5a322335f204..2010d4ac6519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* encapsulation. The reason for that is that we support
* modify headers for outer headers only
*/
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
@@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
}
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 46866a5fc5ca..98320e3945ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 045b0cf90063..728f81882589 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index ec5cbec0d455..7626c85643b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -99,7 +99,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 939b692ffc33..ce843ea91464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
return 0;
errout:
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 01cf5a6a26bd..a2ee695a3f17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -568,10 +568,8 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- struct __ip6_tnl_parm parms6;
-
- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+ &ipip_entry->parms.daddr.addr6,
&ipip_entry->dip_kvdl_index);
}
@@ -579,10 +577,7 @@ static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
- struct __ip6_tnl_parm parms6;
-
- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index b73466470f75..fe663b0ab708 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 93df3049cdc0..830363bafcce 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -28,6 +28,7 @@ config KS8842
config KS8851
tristate "Micrel KS8851 SPI"
depends on SPI
+ depends on PTP_1588_CLOCK_OPTIONAL
select MII
select CRC32
select EEPROM_93CX6
@@ -39,6 +40,7 @@ config KS8851
config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MII
select CRC32
select EEPROM_93CX6
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index ce5970bdcc6a..005e56ea5da1 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
/* If the entry in SW is found, then there is nothing
* to do
@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
if (!mac_entry)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1f8c67f0261b..05f6dcc9dfd5 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -103,6 +103,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
return 0;
}
+static bool lan966x_port_unique_address(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port || port->dev == dev)
+ continue;
+
+ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
+ return false;
+ }
+
+ return true;
+}
+
static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
{
struct lan966x_port *port = netdev_priv(dev);
@@ -110,16 +128,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
const struct sockaddr *addr = p;
int ret;
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
/* Learn the new net device MAC address in the mac table. */
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
if (ret)
return ret;
+ /* If there is another port with the same address as the dev, then don't
+ * delete it from the MAC table
+ */
+ if (!lan966x_port_unique_address(dev))
+ goto out;
+
/* Then forget the previous one. */
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
if (ret)
return ret;
+out:
eth_hw_addr_set(dev, addr->sa_data);
return ret;
}
@@ -446,6 +474,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
return true;
+ if (eth_type_vlan(skb->protocol)) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
if (skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_IGMP)
return false;
@@ -665,6 +699,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
+
+ if (lan966x->ptp_irq)
+ devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index ae782778d6dd..0a1041da4384 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -29,10 +29,10 @@ enum {
static u64 lan966x_ptp_get_nominal_value(void)
{
- u64 res = 0x304d2df1;
-
- res <<= 32;
- return res;
+ /* This is the default value that for each system clock, the time of day
+ * is increased. It has the format 5.59 nanosecond.
+ */
+ return 0x304d4873ecade305;
}
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index e3555c94294d..df2bee678559 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
switchdev_bridge_port_unoffload(port->dev, port,
- &lan966x_switchdev_nb,
- &lan966x_switchdev_blocking_nb);
+ NULL, NULL);
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index e443bd8b2d09..20ceac81a2c2 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -551,7 +551,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter;
- int err;
+ int err = 0;
u32 val;
list_for_each_entry(filter, &block->rules, list) {
@@ -570,7 +570,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
if (vlan_aware)
err = ocelot_del_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge);
- else
+ else if (ocelot_port->bridge)
err = ocelot_add_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge);
if (err)
@@ -629,6 +629,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
+ /* Ignore VID 0 added to our RX filter by the 8021q module, since
+ * that collides with OCELOT_STANDALONE_PVID and changes it from
+ * egress-untagged to egress-tagged.
+ */
+ if (!vid)
+ return 0;
+
err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
@@ -651,6 +658,9 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
bool del_pvid = false;
int err;
+ if (!vid)
+ return 0;
+
if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
del_pvid = true;
@@ -1612,7 +1622,7 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
trap->take_ts = take_ts;
- list_add_tail(&trap->trap_list, &ocelot->traps);
+ trap->is_trap = true;
new = true;
}
@@ -1624,10 +1634,8 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask) {
- list_del(&trap->trap_list);
+ if (!trap->ingress_port_mask)
kfree(trap);
- }
return err;
}
@@ -1647,11 +1655,8 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
return 0;
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask) {
- list_del(&trap->trap_list);
-
+ if (!trap->ingress_port_mask)
return ocelot_vcap_filter_del(ocelot, trap);
- }
return ocelot_vcap_filter_replace(ocelot, trap);
}
@@ -2859,6 +2864,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 03b5e59d033e..51cf241ff7d0 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -280,9 +280,10 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_TRAP:
- if (filter->block_id != VCAP_IS2) {
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Trap action can only be offloaded to VCAP IS2");
+ "Trap action can only be offloaded to VCAP IS2 lookup 0");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
@@ -295,7 +296,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- list_add_tail(&filter->trap_list, &ocelot->traps);
+ filter->is_trap = true;
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
@@ -878,8 +879,6 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
- if (!list_empty(&filter->trap_list))
- list_del(&filter->trap_list);
kfree(filter);
return ret;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index c8701ac955a8..eeb4cc07dd16 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -374,7 +374,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
OCELOT_VCAP_BIT_0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
@@ -1217,6 +1216,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i - 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@ -1250,7 +1251,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter del_filter;
int i, index;
+ /* Need to inherit the block_id so that vcap_entry_set()
+ * does not get confused and knows where to install it.
+ */
memset(&del_filter, 0, sizeof(del_filter));
+ del_filter.block_id = filter->block_id;
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1265,6 +1270,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i + 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 50ac3ee2577a..21d2645885ce 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
- if (segs != NULL) {
- curr = segs;
- segs = next;
+ skb_list_walk_safe(next, curr, next) {
curr->next = NULL;
- dev_kfree_skb_any(segs);
+ dev_kfree_skb_any(curr);
}
goto drop;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 6ffc62c41165..0a7a757494bc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -256,7 +256,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = ionic_map_bars(ionic);
if (err)
- goto err_out_pci_disable_device;
+ goto err_out_pci_release_regions;
/* Configure the device */
err = ionic_setup(ionic);
@@ -360,6 +360,7 @@ err_out_teardown:
err_out_unmap_bars:
ionic_unmap_bars(ionic);
+err_out_pci_release_regions:
pci_release_regions(pdev);
err_out_pci_disable_device:
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index e3edca187ddf..5250d1d1e49c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -489,7 +489,7 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 11
+#define NUM_COMMON_GLOBAL_PARAMS 10
#define MAX_RECURSION_DEPTH 10
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index b242000a77fd..b7cc36589f59 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
buf = page_address(bd->data) + bd->page_offset;
skb = build_skb(buf, rxq->rx_buf_seg_size);
+ if (unlikely(!skb))
+ return NULL;
+
skb_reserve(skb, pad);
skb_put(skb, len);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b30589a135c2..06f4d9a9e938 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers;
unsigned long hw_flags;
- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
+ test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50d535981a35..f8edb3f1b73a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3579,6 +3579,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
n_parts++;
}
+ if (!n_parts) {
+ kfree(parts);
+ return 0;
+ }
+
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index f9064532beb6..40df910aa140 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -786,9 +786,90 @@ void efx_remove_channels(struct efx_nic *efx)
kfree(efx->xdp_tx_queues);
}
+static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
+static void efx_set_xdp_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ unsigned int next_queue = 0;
+ int xdp_queue_number = 0;
+ int rc;
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->tx_channel_offset)
+ continue;
+
+ if (efx_channel_is_xdp_tx(channel)) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ } else {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is HW %u\n",
+ channel->channel, tx_queue->label,
+ tx_queue->queue);
+ }
+
+ /* If XDP is borrowing queues from net stack, it must
+ * use the queue with no csum offload, which is the
+ * first one of the channel
+ * (note: tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode ==
+ EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ }
+ }
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+}
+
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
+ *ptp_channel = efx_ptp_channel(efx);
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
unsigned int i, next_buffer_table = 0;
u32 old_rxq_entries, old_txq_entries;
int rc, rc2;
@@ -857,7 +938,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_init_napi_channel(efx->channel[i]);
}
+ efx_set_xdp_channels(efx);
out:
+ efx->ptp_data = NULL;
/* Destroy unused channel structures */
for (i = 0; i < efx->n_channels; i++) {
channel = other_channel[i];
@@ -868,6 +951,7 @@ out:
}
}
+ efx->ptp_data = ptp_data;
rc2 = efx_soft_enable_interrupts(efx);
if (rc2) {
rc = rc ? rc : rc2;
@@ -886,29 +970,13 @@ rollback:
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++)
swap(efx->channel[i], other_channel[i]);
+ efx_ptp_update_channel(efx, ptp_channel);
goto out;
}
-static inline int
-efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
- struct efx_tx_queue *tx_queue)
-{
- if (xdp_queue_number >= efx->xdp_tx_queue_count)
- return -EINVAL;
-
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- tx_queue->channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- return 0;
-}
-
int efx_set_channels(struct efx_nic *efx)
{
- struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
- unsigned int next_queue = 0;
- int xdp_queue_number;
int rc;
efx->tx_channel_offset =
@@ -926,61 +994,14 @@ int efx_set_channels(struct efx_nic *efx)
return -ENOMEM;
}
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;
-
- if (channel->channel >= efx->tx_channel_offset) {
- if (efx_channel_is_xdp_tx(channel)) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue = next_queue++;
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
- } else {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
- channel->channel, tx_queue->label,
- tx_queue->queue);
- }
-
- /* If XDP is borrowing queues from net stack, it must use the queue
- * with no csum offload, which is the first one of the channel
- * (note: channel->tx_queue_by_type is not initialized yet)
- */
- if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
- tx_queue = &channel->tx_queue[0];
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
- }
- }
}
- WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
- xdp_queue_number != efx->xdp_tx_queue_count);
- WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
- xdp_queue_number > efx->xdp_tx_queue_count);
- /* If we have more CPUs than assigned XDP TX queues, assign the already
- * existing queues to the exceeding CPUs
- */
- next_queue = 0;
- while (xdp_queue_number < efx->xdp_tx_queue_count) {
- tx_queue = efx->xdp_tx_queues[next_queue++];
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
+ efx_set_xdp_channels(efx);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
@@ -1124,7 +1145,7 @@ void efx_start_channels(struct efx_nic *efx)
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rev(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
atomic_inc(&efx->active_queues);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index f0ef515e2ade..4625f85acab2 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -45,6 +45,7 @@
#include "farch_regs.h"
#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
+#include "efx_channels.h"
/* Maximum number of events expected to make up a PTP event */
#define MAX_EVENT_FRAGS 3
@@ -541,6 +542,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
return efx->ptp_data ? efx->ptp_data->channel : NULL;
}
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
+{
+ if (efx->ptp_data)
+ efx->ptp_data->channel = channel;
+}
+
static u32 last_sync_timestamp_major(struct efx_nic *efx)
{
struct efx_channel *channel = efx_ptp_channel(efx);
@@ -1443,6 +1450,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
int rc = 0;
unsigned int pos;
+ if (efx->ptp_data) {
+ efx->ptp_data->channel = channel;
+ return 0;
+ }
+
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
@@ -2176,7 +2188,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.pre_probe = efx_ptp_probe_channel,
.post_remove = efx_ptp_remove_channel,
.get_name = efx_ptp_get_channel_name,
- /* no copy operation; there is no need to reallocate this channel */
+ .copy = efx_copy_channel,
.receive_skb = efx_ptp_rx,
.want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 9855e8c9e544..7b1ef7002b3f 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -16,6 +16,7 @@ struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 1b22c7be0088..fa8b9aacca11 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -150,6 +150,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
int i;
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
/* Unmap and release the pages in the recycle ring. Remove the ring. */
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
struct page *page = rx_queue->page_ring[i];
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index d16e031e95f4..6983799e1c05 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (unlikely(!tx_queue))
return -EINVAL;
+ if (!tx_queue->initialised)
+ return -EINVAL;
+
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index d530cde2b864..9bc8281b7f5b 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
+ tx_queue->initialised = false;
+
if (!tx_queue->buffer)
return;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 7a50ba00f8ae..c854efdf1f25 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2431,7 +2431,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (irq == -EPROBE_DEFER) {
retval = -EPROBE_DEFER;
goto out_0;
- } else if (irq <= 0) {
+ } else if (irq < 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index cd478d2cd871..00f6d347eaf7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-#define SGMII_ADAPTER_ENABLE 0x0000
-
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
- void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
- writew(SGMII_ADAPTER_ENABLE,
- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
index 442812c0a4bd..694ac25ef426 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_ENABLE 0x0000
+#define SGMII_ADAPTER_DISABLE 0x0001
+
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 63754a9c4ba7..0b0be0898ac5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -454,6 +454,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
plat->tso_en = 1;
+ plat->sph_disable = 1;
/* Multiplying factor to the clk_eee_i clock time
* period to make it closer to 100 ns. This value
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index ecf759ee1c9f..017dbbda0c1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -205,7 +205,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
-struct pci_driver loongson_dwmac_driver = {
+static struct pci_driver loongson_dwmac_driver = {
.name = "dwmac-loongson-pci",
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index b7c2579c963b..6b447d8f0bd8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,14 +59,13 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
- void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if ((tse_pcs_base) && (sgmii_adapter_base))
+ if (sgmii_adapter_base)
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
@@ -93,8 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (tse_pcs_base && sgmii_adapter_base)
+ if (phy_dev && sgmii_adapter_base) {
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+ }
}
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f86cc83003f2..f834472599f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -907,6 +907,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
&gmac->mux_handle, priv, priv->mii);
+ of_node_put(mdio_mux);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 22fea0f67245..92d32940aff0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
!(value & PTP_TCR_TSINIT),
- 10000, 100000);
+ 10, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4a4b3651ab3e..2525a80353b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7021,7 +7021,7 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen) {
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph_cap = true;
priv->sph = priv->sph_cap;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fcf17d8a0494..644bb54f5f02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i));
break;
}
-
- pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5d29f336315b..11e1055e8260 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->phylink_node = np;
/* Get max speed of operation from device tree */
- if (of_property_read_u32(np, "max-speed", &plat->max_speed))
- plat->max_speed = -1;
+ of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index bd4b1528cf99..79e850fe4621 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1246,8 +1246,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
sizeof(struct cpsw_slave_data),
GFP_KERNEL);
- if (!data->slave_data)
+ if (!data->slave_data) {
+ of_node_put(tmp_node);
return -ENOMEM;
+ }
/* Populate all the child nodes here...
*/
@@ -1341,6 +1343,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
err_node_put:
of_node_put(port_np);
+ of_node_put(tmp_node);
return ret;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 0f9c88dd1a4a..d5c1e5c4a508 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -433,8 +433,6 @@ struct axienet_local {
struct net_device *ndev;
struct device *dev;
- struct device_node *phy_node;
-
struct phylink *phylink;
struct phylink_config phylink_config;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c7eb05e4a6bf..d6fc3f7acdf0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2064,25 +2064,33 @@ static int axienet_probe(struct platform_device *pdev)
if (ret)
goto cleanup_clk;
- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (lp->phy_node) {
- ret = axienet_mdio_setup(lp);
- if (ret)
- dev_warn(&pdev->dev,
- "error registering MDIO bus: %d\n", ret);
- }
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
- if (!lp->phy_node) {
- dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
+ if (!np) {
+ /* Deprecated: Always use "pcs-handle" for pcs_phy.
+ * Falling back to "phy-handle" here is only for
+ * backward compatibility with old device trees.
+ */
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ }
+ if (!np) {
+ dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
ret = -EINVAL;
goto cleanup_mdio;
}
- lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ lp->pcs_phy = of_mdio_find_device(np);
if (!lp->pcs_phy) {
ret = -EPROBE_DEFER;
+ of_node_put(np);
goto cleanup_mdio;
}
+ of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
lp->pcs.poll = true;
}
@@ -2125,8 +2133,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
- of_node_put(lp->phy_node);
-
cleanup_clk:
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
clk_disable_unprepare(lp->axi_clk);
@@ -2155,9 +2161,6 @@ static int axienet_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
clk_disable_unprepare(lp->axi_clk);
- of_node_put(lp->phy_node);
- lp->phy_node = NULL;
-
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 57a24f62e353..d770b3ac3f74 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -823,10 +823,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
- int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
struct device_node *npp;
+ int rc, ret;
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
@@ -836,8 +836,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
return -ENODEV;
}
npp = of_get_parent(np);
-
- of_address_to_resource(npp, 0, &res);
+ ret = of_address_to_resource(npp, 0, &res);
+ of_node_put(npp);
+ if (ret) {
+ dev_err(dev, "%s resource error!\n",
+ dev->of_node->full_name);
+ of_node_put(np);
+ return ret;
+ }
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
phydev = of_phy_find_device(lp->phy_node);
@@ -846,6 +852,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
"MDIO of the phy is not registered yet\n");
else
put_device(&phydev->mdio.dev);
+ of_node_put(np);
return 0;
}
@@ -858,6 +865,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus = mdiobus_alloc();
if (!bus) {
dev_err(dev, "Failed to allocate mdiobus\n");
+ of_node_put(np);
return -ENOMEM;
}
@@ -870,6 +878,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus->parent = dev;
rc = of_mdiobus_register(bus, np);
+ of_node_put(np);
if (rc) {
dev_err(dev, "Failed to register mdio bus.\n");
goto err_register;
@@ -926,8 +935,6 @@ static int xemaclite_open(struct net_device *dev)
xemaclite_disable_interrupts(lp);
if (lp->phy_node) {
- u32 bmcr;
-
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
xemaclite_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
@@ -938,19 +945,6 @@ static int xemaclite_open(struct net_device *dev)
/* EmacLite doesn't support giga-bit speeds */
phy_set_max_speed(lp->phy_dev, SPEED_100);
-
- /* Don't advertise 1000BASE-T Full/Half duplex speeds */
- phy_write(lp->phy_dev, MII_CTRL1000, 0);
-
- /* Advertise only 10 and 100mbps full/half duplex speeds */
- phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
- ADVERTISE_CSMA);
-
- /* Restart auto negotiation */
- bmcr = phy_read(lp->phy_dev, MII_BMCR);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- phy_write(lp->phy_dev, MII_BMCR, bmcr);
-
phy_start(lp->phy_dev);
}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 16105292b140..74e845fa2e07 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1355,7 +1355,9 @@ static int rr_close(struct net_device *dev)
rrpriv->fw_running = 0;
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
del_timer_sync(&rrpriv->timer);
+ spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, &regs->TxPi);
writel(0, &regs->IpRxPi);
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index bc981043cc80..a701178a1d13 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
struct gsi_event *event_done;
struct gsi_event *event;
struct gsi_trans *trans;
+ u32 trans_count = 0;
u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
trans_info = &channel->trans_info;
@@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
do {
trans->len = __le16_to_cpu(event->len);
byte_count += trans->len;
+ trans_count++;
/* Move on to the next event and transaction */
if (--event_avail)
@@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* We record RX bytes when they are received */
channel->byte_count += byte_count;
- channel->trans_count++;
+ channel->trans_count += trans_count;
}
/* Initialize a ring, including allocating DMA memory for its entries */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 888e94278a84..cea7b2e2ce96 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1150,13 +1150,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
return;
skb = __dev_alloc_skb(len, GFP_ATOMIC);
- if (!skb)
- return;
-
- /* Copy the data into the socket buffer and receive it */
- skb_put(skb, len);
- memcpy(skb->data, data, len);
- skb->truesize += extra;
+ if (skb) {
+ /* Copy the data into the socket buffer and receive it */
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
ipa_modem_skb_rx(endpoint->netdev, skb);
}
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 90f3aec55b36..ec010cf2e816 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
*/
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
{
- struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct ipa *ipa;
int ret;
/* We aren't ready until the modem and microcontroller are */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 069e8824c264..b00bc8173abe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
*pskb = skb;
eth = eth_hdr(skb);
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
src = macvlan_hash_lookup(port, eth->h_source);
if (src && src->mode != MACVLAN_MODE_VEPA &&
src->mode != MACVLAN_MODE_BRIDGE) {
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index baf7afac7857..53846c6b56ca 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -553,7 +553,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
mhdr->ver = 0x01;
- return 0;
+ return sizeof(struct mctp_i2c_hdr);
}
static int mctp_i2c_tx_thread(void *data)
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 1becb1a731f6..1c1584fca632 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
int rc;
rc = fwnode_irq_get(child, 0);
+ /* Don't wait forever if the IRQ provider doesn't become available,
+ * just fall back to poll mode
+ */
+ if (rc == -EPROBE_DEFER)
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
if (rc == -EPROBE_DEFER)
return rc;
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index c483ba67c21f..582969751b4c 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -102,6 +102,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
u32 val;
int ret;
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
ret = mscc_miim_wait_pending(bus);
if (ret)
goto out;
@@ -145,6 +148,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
struct mscc_miim_dev *miim = bus->priv;
int ret;
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
ret = mscc_miim_wait_pending(bus);
if (ret < 0)
goto out;
diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c
index 6dcbf987d61b..8b444a8eb6b5 100644
--- a/drivers/net/mdio/mdio-mux-bcm6368.c
+++ b/drivers/net/mdio/mdio-mux-bcm6368.c
@@ -115,7 +115,7 @@ static int bcm6368_mdiomux_probe(struct platform_device *pdev)
md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
- return ENOMEM;
+ return -ENOMEM;
}
bus = md->mii_bus;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index b6fea119fe13..2b7d0720720b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -880,7 +880,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
if (cssr1 < 0)
- return val;
+ return cssr1;
/* If the link settings are not resolved, mark the link down */
if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 19b11e896460..cd9aa353b653 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -99,15 +99,6 @@
#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2)
#define PTP_TIMESTAMP_EN_PDRES_ BIT(3)
-#define PTP_RX_LATENCY_1000 0x0224
-#define PTP_TX_LATENCY_1000 0x0225
-
-#define PTP_RX_LATENCY_100 0x0222
-#define PTP_TX_LATENCY_100 0x0223
-
-#define PTP_RX_LATENCY_10 0x0220
-#define PTP_TX_LATENCY_10 0x0221
-
#define PTP_TX_PARSE_L2_ADDR_EN 0x0284
#define PTP_RX_PARSE_L2_ADDR_EN 0x0244
@@ -268,15 +259,6 @@ struct lan8814_ptp_rx_ts {
u16 seq_id;
};
-struct kszphy_latencies {
- u16 rx_10;
- u16 tx_10;
- u16 rx_100;
- u16 tx_100;
- u16 rx_1000;
- u16 tx_1000;
-};
-
struct kszphy_ptp_priv {
struct mii_timestamper mii_ts;
struct phy_device *phydev;
@@ -296,7 +278,6 @@ struct kszphy_ptp_priv {
struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
- struct kszphy_latencies latencies;
const struct kszphy_type *type;
int led_mode;
bool rmii_ref_clk_sel;
@@ -304,14 +285,6 @@ struct kszphy_priv {
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
-static struct kszphy_latencies lan8814_latencies = {
- .rx_10 = 0x22AA,
- .tx_10 = 0x2E4A,
- .rx_100 = 0x092A,
- .tx_100 = 0x02C1,
- .rx_1000 = 0x01AD,
- .tx_1000 = 0x00C9,
-};
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
.has_broadcast_disable = true,
@@ -1770,7 +1743,7 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
{
- u32 data;
+ int data;
phy_lock_mdio_bus(phydev);
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
@@ -2471,8 +2444,7 @@ static int lan8804_config_init(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
- u16 tsu_irq_status;
- int irq_status;
+ int irq_status, tsu_irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
@@ -2618,55 +2590,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
return 0;
}
-static int lan8814_read_status(struct phy_device *phydev)
-{
- struct kszphy_priv *priv = phydev->priv;
- struct kszphy_latencies *latencies = &priv->latencies;
- int err;
- int regval;
-
- err = genphy_read_status(phydev);
- if (err)
- return err;
-
- switch (phydev->speed) {
- case SPEED_1000:
- lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000,
- latencies->rx_1000);
- lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000,
- latencies->tx_1000);
- break;
- case SPEED_100:
- lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100,
- latencies->rx_100);
- lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100,
- latencies->tx_100);
- break;
- case SPEED_10:
- lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10,
- latencies->rx_10);
- lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10,
- latencies->tx_10);
- break;
- default:
- break;
- }
-
- /* Make sure the PHY is not broken. Read idle error count,
- * and reset the PHY if it is maxed out.
- */
- regval = phy_read(phydev, MII_STAT1000);
- if ((regval & 0xFF) == 0xFF) {
- phy_init_hw(phydev);
- phydev->link = 0;
- if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
- phydev->drv->config_intr(phydev);
- return genphy_config_aneg(phydev);
- }
-
- return 0;
-}
-
static int lan8814_config_init(struct phy_device *phydev)
{
int val;
@@ -2690,30 +2613,8 @@ static int lan8814_config_init(struct phy_device *phydev)
return 0;
}
-static void lan8814_parse_latency(struct phy_device *phydev)
-{
- const struct device_node *np = phydev->mdio.dev.of_node;
- struct kszphy_priv *priv = phydev->priv;
- struct kszphy_latencies *latency = &priv->latencies;
- u32 val;
-
- if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val))
- latency->rx_10 = val;
- if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val))
- latency->tx_10 = val;
- if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val))
- latency->rx_100 = val;
- if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val))
- latency->tx_100 = val;
- if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val))
- latency->rx_1000 = val;
- if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val))
- latency->tx_1000 = val;
-}
-
static int lan8814_probe(struct phy_device *phydev)
{
- const struct device_node *np = phydev->mdio.dev.of_node;
struct kszphy_priv *priv;
u16 addr;
int err;
@@ -2724,13 +2625,10 @@ static int lan8814_probe(struct phy_device *phydev)
priv->led_mode = -1;
- priv->latencies = lan8814_latencies;
-
phydev->priv = priv;
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
- !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ||
- of_property_read_bool(np, "lan8814,ignore-ts"))
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
return 0;
/* Strap-in value for PHY address, below register read gives starting
@@ -2746,7 +2644,6 @@ static int lan8814_probe(struct phy_device *phydev)
return err;
}
- lan8814_parse_latency(phydev);
lan8814_ptp_init(phydev);
return 0;
@@ -2759,6 +2656,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
+ .probe = kszphy_probe,
.config_init = kszphy_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
@@ -2884,8 +2782,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -2928,7 +2826,7 @@ static struct phy_driver ksphy_driver[] = {
.config_init = lan8814_config_init,
.probe = lan8814_probe,
.soft_reset = genphy_soft_reset,
- .read_status = lan8814_read_status,
+ .read_status = ksz9031_read_status,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 389df3f4293c..c2c0e361fd3d 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
u16 ctl = 0;
- int rc;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return -EOPNOTSUPP;
}
- rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
- if (rc == 1)
- rc = genphy_soft_reset(phydev);
-
- return rc;
+ return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
}
static struct phy_driver microchip_t1_phy_driver[] = {
@@ -748,6 +743,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
.name = "Microchip LAN937x T1",
+ .flags = PHY_POLL_CABLE_TEST,
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index beb2b66da132..f122026c4682 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -970,8 +970,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
struct phy_driver *drv = phydev->drv;
+ irqreturn_t ret;
- return drv->handle_interrupt(phydev);
+ mutex_lock(&phydev->lock);
+ ret = drv->handle_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
/**
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4dfb79807823..9a5d5a10560f 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -250,6 +250,7 @@ struct sfp {
struct sfp_eeprom_id id;
unsigned int module_power_mW;
unsigned int module_t_start_up;
+ bool tx_fault_ignore;
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
@@ -1956,6 +1957,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
else
sfp->module_t_start_up = T_START_UP;
+ if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) &&
+ !memcmp(id.base.vendor_pn, "MA5671A ", 16))
+ sfp->tx_fault_ignore = true;
+ else
+ sfp->tx_fault_ignore = false;
+
return 0;
}
@@ -2409,7 +2416,10 @@ static void sfp_check_state(struct sfp *sfp)
mutex_lock(&sfp->st_mutex);
state = sfp_get_state(sfp);
changed = state ^ sfp->state;
- changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
+ if (sfp->tx_fault_ignore)
+ changed &= SFP_F_PRESENT | SFP_F_LOS;
+ else
+ changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
for (i = 0; i < GPIO_MAX; i++)
if (changed & BIT(i))
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3619520340b7..e172743948ed 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
+ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev;
ctx->dev = dev;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 88396ff99f03..6865d32270e5 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
- if (!netif_running(dev))
+ if (!netif_running(dev) || !sl->tty)
goto out;
/* May be we must check transmitter timeout here ?
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 276a0e42ca8e..dbe4c0a4be2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
- queue->trans_start = jiffies;
+ txq_trans_cond_update(queue);
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index ea06d10e1c21..ca409d450a29 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (start_of_descs != desc_offset)
goto err;
- /* self check desc_offset from header*/
- if (desc_offset >= skb_len)
+ /* self check desc_offset from header and make sure that the
+ * bounds of the metadata array are inside the SKB
+ */
+ if (pkt_count * 2 + desc_offset >= skb_len)
goto err;
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, desc_offset);
+
if (pkt_count == 0)
goto err;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 1b5714926d81..eb0121a64d6d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
+ if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 87838cbe38cf..cbba9d2e8f32 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1005,6 +1005,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* xdp.data_meta were adjusted
*/
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
+
+ /* recalculate headroom if xdp.data or xdp_data_meta
+ * were adjusted, note that offset should always point
+ * to the start of the reserved bytes for virtio_net
+ * header which are followed by xdp.data, that means
+ * that offset is equal to the headroom (when buf is
+ * starting at the beginning of the page, otherwise
+ * there is a base offset inside the page) but it's used
+ * with a different starting point (buf start) than
+ * xdp.data (buf start + vnet hdr size). If xdp.data or
+ * data_meta were adjusted by the xdp prog then the
+ * headroom size has changed and so has the offset, we
+ * can use data_hard_start, which points at buf start +
+ * vnet hdr size, to calculate the new headroom and use
+ * it later to compute buf start in page_to_skb()
+ */
+ headroom = xdp.data - xdp.data_hard_start - metasize;
+
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
@@ -1012,7 +1030,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
metasize,
- VIRTIO_XDP_HEADROOM);
+ headroom);
return head_skb;
}
break;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d9d90baac72a..93e8d119d45f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
+ rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
+ rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
+ /* ring has already been cleaned up */
+ if (!rq->rx_ring[0].base)
+ return;
+
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85e362461d71..cfc30ce4c6e1 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1265,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
eth = (struct ethhdr *)skb->data;
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
/* we set the ethernet destination and the source addresses to the
* address of the VRF device.
@@ -1294,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
*/
static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
struct net_device *vrf_dev,
- u16 proto)
+ u16 proto, struct net_device *orig_dev)
{
- if (skb_mac_header_was_set(skb))
+ if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
return 0;
return vrf_prepare_mac_header(skb, vrf_dev, proto);
@@ -1402,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
/* if packet is NDISC then keep the ingress interface */
if (!is_ndisc) {
+ struct net_device *orig_dev = skb->dev;
+
vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1410,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
int err;
err = vrf_add_mac_header_if_unset(skb, vrf_dev,
- ETH_P_IPV6);
+ ETH_P_IPV6,
+ orig_dev);
if (likely(!err)) {
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
@@ -1440,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
+ struct net_device *orig_dev = skb->dev;
+
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
@@ -1460,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
if (!list_empty(&vrf_dev->ptype_all)) {
int err;
- err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP);
+ err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
+ orig_dev);
if (likely(!err)) {
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index de97ff98d36e..8a5e3a6d32d7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
- return -ENOBUFS;
+ return -ENOMEM;
}
rd->remote_ip = *ip;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 23d2954d9747..1e5672019922 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -349,7 +349,7 @@ static int __init cosa_init(void)
}
} else {
cosa_major = register_chrdev(0, "cosa", &cosa_fops);
- if (!cosa_major) {
+ if (cosa_major < 0) {
pr_warn("unable to register chardev\n");
err = -EIO;
goto out;
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 0fad1331303c..aa9a7a5970fd 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -19,6 +19,7 @@
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/suspend.h>
+#include <net/dst_metadata.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/ip_tunnels.h>
@@ -167,7 +168,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_peer;
}
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) {
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 63e1c2d783c5..73693c66cef1 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
return;
}
- ret = mmc_hw_reset(ar_sdio->func->card->host);
+ ret = mmc_hw_reset(ar_sdio->func->card);
if (ret)
ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 71eb7d04c3bf..90a5df1fbdbd 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1288,6 +1288,7 @@ static void ath11k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar);
+ complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->peer_assoc_done);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index c0228e91a596..b8634eddf49a 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -38,6 +38,8 @@
extern unsigned int ath11k_frame_mode;
+#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
+
#define ATH11K_MON_TIMER_INTERVAL 10
enum ath11k_supported_bw {
@@ -189,6 +191,12 @@ enum ath11k_scan_state {
ATH11K_SCAN_ABORTING,
};
+enum ath11k_11d_state {
+ ATH11K_11D_IDLE,
+ ATH11K_11D_PREPARING,
+ ATH11K_11D_RUNNING,
+};
+
enum ath11k_dev_flags {
ATH11K_CAC_RUNNING,
ATH11K_FLAG_CORE_REGISTERED,
@@ -607,9 +615,8 @@ struct ath11k {
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
u32 vdev_id_11d_scan;
- struct completion finish_11d_scan;
- struct completion finish_11d_ch_list;
- bool pending_11d;
+ struct completion completed_11d_scan;
+ enum ath11k_11d_state state_11d;
bool regdom_set_by_user;
int hw_rate_code;
u8 twt_enabled;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index d5b83f90d27a..58ff761393db 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
arvif->do_not_send_tmpl = true;
else
arvif->do_not_send_tmpl = false;
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BA_MODE,
- WMI_BA_MODE_BUFFER_SIZE_256);
- if (ret)
- ath11k_warn(ar->ab,
- "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
- arvif->vdev_id);
-
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -3595,26 +3601,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- /* Currently the pending_11d=true only happened 1 time while
- * wlan interface up in ath11k_mac_11d_scan_start(), it is called by
- * ath11k_mac_op_add_interface(), after wlan interface up,
- * pending_11d=false always.
- * If remove below wait, it always happened scan fail and lead connect
- * fail while wlan interface up, because it has a 11d scan which is running
- * in firmware, and lead this scan failed.
- */
- if (ar->pending_11d) {
- long time_left;
- unsigned long timeout = 5 * HZ;
-
- if (ar->supports_6ghz)
- timeout += 5 * HZ;
-
- time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac wait 11d channel list time left %ld\n", time_left);
- }
-
memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
@@ -3680,6 +3666,10 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
return ret;
}
@@ -5808,7 +5798,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
/* TODO: Do we need to enable ANI? */
- ath11k_reg_update_chan_list(ar);
+ ath11k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
@@ -5875,6 +5865,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&ar->ab->update_11d_work);
cancel_work_sync(&ar->ab->rfkill_work);
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
@@ -6045,7 +6040,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
return false;
}
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
{
struct wmi_11d_scan_start_params param;
int ret;
@@ -6073,28 +6068,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
- if (wait)
- reinit_completion(&ar->finish_11d_scan);
-
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
if (ret) {
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
vdev_id, ret);
} else {
ar->vdev_id_11d_scan = vdev_id;
- if (wait) {
- ar->pending_11d = true;
- ret = wait_for_completion_timeout(&ar->finish_11d_scan,
- 5 * HZ);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac 11d scan left time %d\n", ret);
-
- if (!ret)
- ar->pending_11d = false;
- }
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ar->state_11d = ATH11K_11D_RUNNING;
}
fin:
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
@@ -6117,12 +6106,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
vdev_id = ar->vdev_id_11d_scan;
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
- if (ret)
+ if (ret) {
ath11k_warn(ar->ab,
"failed to stopt 11d scan vdev %d ret: %d\n",
vdev_id, ret);
- else
+ } else {
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
}
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
@@ -6318,8 +6310,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
-
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH11K_11D_PREPARING;
+ }
break;
case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
@@ -7184,7 +7178,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
}
@@ -8665,8 +8659,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
- init_completion(&ar->finish_11d_scan);
- init_completion(&ar->finish_11d_ch_list);
+ init_completion(&ar->completed_11d_scan);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0e6c870b09c8..29b523af66dd 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
#define ATH11K_SCAN_11D_INTERVAL 600000
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_11d_scan_stop(struct ath11k *ar);
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 81e11cde31d7..80a697771393 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -102,7 +102,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ar->regdom_set_by_user = true;
}
-int ath11k_reg_update_chan_list(struct ath11k *ar)
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params;
@@ -111,7 +111,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret;
+ int i, ret, left;
+
+ if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if (wait &&
+ (ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
@@ -193,11 +218,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
- if (ar->pending_11d) {
- complete(&ar->finish_11d_ch_list);
- ar->pending_11d = false;
- }
-
return ret;
}
@@ -263,15 +283,8 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
- if (ar->pending_11d)
- complete(&ar->finish_11d_scan);
-
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
-
- if (ar->pending_11d)
- reinit_completion(&ar->finish_11d_ch_list);
-
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
@@ -282,7 +295,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
if (ar->state == ATH11K_STATE_ON) {
- ret = ath11k_reg_update_chan_list(ar);
+ ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
goto err;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 5fb9dc03a74e..2f284f26378d 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -32,5 +32,5 @@ struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
int ath11k_regd_update(struct ath11k *ar);
-int ath11k_reg_update_chan_list(struct ath11k *ar);
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index b4f86c45d81f..2751fe8814df 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -2015,7 +2015,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
{
/* setup commonly used values */
arg->scan_req_id = 1;
- arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
@@ -6350,8 +6353,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const struct wmi_11d_new_cc_ev *ev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
const void **tb;
- int ret;
+ int ret, i;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -6377,6 +6382,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
kfree(tb);
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
queue_work(ab->workqueue, &ab->update_11d_work);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 98090e40e1cf..e2791d45f5f5 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
- fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d0caf1de2bde..db83cc4ba810 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
- sizeof(tx_info->rate_driver_data));
- return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+ sizeof(tx_info->status.status_driver_data));
+ return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2542,6 +2542,16 @@ skip_tx_complete:
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+ void *ptr = &tx_info->status;
+
+ memset(ptr + sizeof(tx_info->status.rates), 0,
+ sizeof(tx_info->status) -
+ sizeof(tx_info->status.rates) -
+ sizeof(tx_info->status.status_driver_data));
+}
+
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
+ ath_clear_tx_status(tx_info);
+
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+ tx_info->status.rates[i].count = 0;
+ tx_info->status.rates[i].idx = -1;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
-
- for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
- tx_info->status.rates[i].count = 0;
- tx_info->status.rates[i].idx = -1;
- }
-
- tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
- /* we report airtime in ath_tx_count_airtime(), don't report twice */
- tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index ba3c159111d3..212fbbe1cd7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
@@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
/* reset the adapter */
sdio_claim_host(sdiodev->func1);
- mmc_hw_reset(sdiodev->func1->card->host);
+ mmc_hw_reset(sdiodev->func1->card);
sdio_release_host(sdiodev->func1);
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 866a33f49915..3237d4b528b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -371,7 +371,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
struct iwl_dbg_tlv_timer_node *node, *tmp;
list_for_each_entry_safe(node, tmp, timer_list, list) {
- del_timer(&node->timer);
+ del_timer_sync(&node->timer);
list_del(&node->list);
kfree(node);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 28bfa7b7b73c..e9ec63e0e395 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2202,11 +2202,14 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
if (!data->use_chanctx) {
confbw = data->bw;
} else {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference(vif->chanctx_conf);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (!WARN_ON(!chanctx_conf))
confbw = chanctx_conf->def.width;
+ rcu_read_unlock();
}
WARN(bw > hwsim_get_chanwidth(confbw),
@@ -2475,11 +2478,13 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ rcu_read_lock();
if (!ieee80211_tx_prepare_skb(hwsim->hw,
hwsim->hw_scan_vif,
probe,
hwsim->tmp_chan->band,
NULL)) {
+ rcu_read_unlock();
kfree_skb(probe);
continue;
}
@@ -2487,6 +2492,7 @@ static void hw_scan_work(struct work_struct *work)
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
+ rcu_read_unlock();
local_bh_enable();
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bde9e4bbfffe..4f3238d2a171 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2639,7 +2639,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
/* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- ret = mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card);
sdio_release_host(func);
switch (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 8a22ee581674..df85ebc6e1df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 72fc41ac83c0..9140b0163474 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -146,7 +146,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
* To guarantee that the SDIO card is power cycled, as required to make
* the FW programming to succeed, let's do a brute force HW reset.
*/
- mmc_hw_reset(card->host);
+ mmc_hw_reset(card);
sdio_enable_func(func);
sdio_release_host(func);
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 2fcf545012b1..1a5284de4341 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -183,6 +183,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
{
struct nci_dev *ndev = priv->ndev;
+ nci_unregister_device(ndev);
if (priv->ndev->nfc_dev->fw_download_in_progress)
nfcmrvl_fw_dnld_abort(priv);
@@ -191,7 +192,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
if (gpio_is_valid(priv->config.reset_n_io))
gpio_free(priv->config.reset_n_io);
- nci_unregister_device(ndev);
nci_free_device(ndev);
kfree(priv);
}
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a491db46e3bd..d9f6367b9993 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2787,13 +2787,14 @@ void pn53x_common_clean(struct pn533 *priv)
{
struct pn533_cmd *cmd, *n;
+ /* delete the timer before cleanup the worker */
+ del_timer_sync(&priv->listen_timer);
+
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
skb_queue_purge(&priv->resp_q);
- del_timer(&priv->listen_timer);
-
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
list_del(&cmd->queue);
kfree(cmd);
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 7d49eb34b348..4910543f00ff 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -4,7 +4,6 @@
* Copyright (c) 2022, Oracle and/or its affiliates
*/
-#include <linux/blkdev.h>
#include "nvme.h"
#ifdef CONFIG_NVME_VERBOSE_ERRORS
@@ -92,6 +91,7 @@ static const char * const nvme_statuses[] = {
[NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
[NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
[NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
+ [NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY] = "Admin Command Media Not Ready",
[NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
[NVME_SC_LBA_RANGE] = "LBA Out of Range",
[NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
@@ -155,10 +155,13 @@ static const char * const nvme_statuses[] = {
[NVME_SC_COMPARE_FAILED] = "Compare Failure",
[NVME_SC_ACCESS_DENIED] = "Access Denied",
[NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
+ [NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error",
[NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
[NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
[NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
+ [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error",
[NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
+ [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
const unsigned char *nvme_get_error_status_str(u16 status)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index efb85c6d8e2d..72f7c955c707 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
nvme_log_error(req);
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
+ req->rq_flags |= RQF_QUIET;
ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0)
*result = nvme_req(req)->result;
@@ -1206,6 +1207,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
+ rq->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
}
@@ -1287,6 +1289,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_EUI64_LEN;
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
return NVME_NIDT_EUI64_LEN;
case NVME_NIDT_NGUID:
@@ -1295,6 +1299,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_NGUID_LEN;
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
return NVME_NIDT_NGUID_LEN;
case NVME_NIDT_UUID:
@@ -1303,6 +1309,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_UUID_LEN;
uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN;
case NVME_NIDT_CSI:
@@ -1399,12 +1407,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+ dev_info(ctrl->device,
+ "Ignoring bogus Namespace Identifiers\n");
+ } else {
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ }
return 0;
@@ -1413,6 +1427,32 @@ out_free_id:
return error;
}
+static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_id_ns_cs_indep **id)
+{
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_NS_CS_INDEP,
+ };
+ int ret;
+
+ *id = kmalloc(sizeof(**id), GFP_KERNEL);
+ if (!*id)
+ return -ENOMEM;
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
+ if (ret) {
+ dev_warn(ctrl->device,
+ "Identify namespace (CS independent) failed (%d)\n",
+ ret);
+ kfree(*id);
+ return ret;
+ }
+
+ return 0;
+}
+
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{
@@ -1608,20 +1648,22 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
u32 size = queue_logical_block_size(queue);
if (ctrl->max_discard_sectors == 0) {
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
+ blk_queue_max_discard_sectors(queue, 0);
return;
}
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- queue->limits.discard_alignment = 0;
queue->limits.discard_granularity = size;
/* If discard is already enabled, don't reset queue limits */
- if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
+ if (queue->limits.max_discard_sectors)
return;
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+ ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
@@ -1758,7 +1800,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 7);
+ blk_queue_dma_alignment(q, 3);
blk_queue_write_cache(q, vwc, vwc);
}
@@ -2087,10 +2129,9 @@ static const struct block_device_operations nvme_bdev_ops = {
.pr_ops = &nvme_pr_ops,
};
-static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
+static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 timeout, bool enabled)
{
- unsigned long timeout =
- ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ unsigned long timeout_jiffies = ((timeout + 1) * HZ / 2) + jiffies;
u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
int ret;
@@ -2103,7 +2144,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
usleep_range(1000, 2000);
if (fatal_signal_pending(current))
return -EINTR;
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, timeout_jiffies)) {
dev_err(ctrl->device,
"Device not ready; aborting %s, CSTS=0x%x\n",
enabled ? "initialisation" : "reset", csts);
@@ -2134,13 +2175,14 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
- return nvme_wait_ready(ctrl, ctrl->cap, false);
+ return nvme_wait_ready(ctrl, NVME_CAP_TIMEOUT(ctrl->cap), false);
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
unsigned dev_page_min;
+ u32 timeout;
int ret;
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
@@ -2161,6 +2203,27 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
ctrl->ctrl_config = NVME_CC_CSS_CSI;
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
+
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+ u32 crto;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
+ ctrl->ctrl_config |= NVME_CC_CRIME;
+ timeout = NVME_CRTO_CRIMT(crto);
+ } else {
+ timeout = NVME_CRTO_CRWMT(crto);
+ }
+ } else {
+ timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+ }
+
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
@@ -2169,7 +2232,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
- return nvme_wait_ready(ctrl, ctrl->cap, true);
+ return nvme_wait_ready(ctrl, timeout, true);
}
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
@@ -2881,8 +2944,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
if (id->dmrl)
ctrl->max_discard_segments = id->dmrl;
- if (id->dmrsl)
- ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
+ ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
@@ -3067,10 +3129,6 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- ret = nvme_init_non_mdts_limits(ctrl);
- if (ret < 0)
- return ret;
-
ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
@@ -3133,6 +3191,7 @@ static const struct file_operations nvme_dev_fops = {
.release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_dev_uring_cmd,
};
static ssize_t nvme_sysfs_reset(struct device *dev,
@@ -3686,6 +3745,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.release = nvme_ns_chr_release,
.unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_ns_chr_uring_cmd,
};
static int nvme_add_ns_cdev(struct nvme_ns *ns)
@@ -4077,11 +4137,26 @@ out:
static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns_ids ids = { };
+ struct nvme_id_ns_cs_indep *id;
struct nvme_ns *ns;
+ bool ready = true;
if (nvme_identify_ns_descs(ctrl, nsid, &ids))
return;
+ /*
+ * Check if the namespace is ready. If not ignore it, we will get an
+ * AEN once it becomes ready and restart the scan.
+ */
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
+ !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
+ ready = id->nstat & NVME_NSTAT_NRDY;
+ kfree(id);
+ }
+
+ if (!ready)
+ return;
+
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
nvme_validate_ns(ns, &ids);
@@ -4224,11 +4299,26 @@ static void nvme_scan_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, scan_work);
+ int ret;
/* No tagset on a live ctrl means IO queues could not created */
if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
return;
+ /*
+ * Identify controller limits can change at controller reset due to
+ * new firmware download, even though it is not common we cannot ignore
+ * such scenario. Controller's non-mdts limits are reported in the unit
+ * of logical blocks that is dependent on the format of attached
+ * namespace. Hence re-read the limits at the time of ns allocation.
+ */
+ ret = nvme_init_non_mdts_limits(ctrl);
+ if (ret < 0) {
+ dev_warn(ctrl->device,
+ "reading non-mdts-limits failed: %d\n", ret);
+ return;
+ }
+
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
@@ -4826,6 +4916,8 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
+ NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 1e3a09cad961..46d6e194ac2b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -187,6 +187,14 @@ static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
return ctrl->subsys->subnqn;
}
+static inline void nvmf_complete_timed_out_request(struct request *rq)
+{
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 080f85f4105f..7ae72c7a211b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3831,6 +3831,9 @@ process_local_list:
return count;
}
+static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
/* Parse the cgroup id from a buf and return the length of cgrpid */
static int fc_parse_cgrpid(const char *buf, u64 *id)
{
@@ -3854,12 +3857,10 @@ static int fc_parse_cgrpid(const char *buf, u64 *id)
}
/*
- * fc_update_appid: Parse and update the appid in the blkcg associated with
- * cgroupid.
- * @buf: buf contains both cgrpid and appid info
- * @count: size of the buffer
+ * Parse and update the appid in the blkcg associated with the cgroupid.
*/
-static int fc_update_appid(const char *buf, size_t count)
+static ssize_t fc_appid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
u64 cgrp_id;
int appid_len = 0;
@@ -3887,23 +3888,14 @@ static int fc_update_appid(const char *buf, size_t count)
return ret;
return count;
}
-
-static ssize_t fc_appid_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- int ret = 0;
-
- ret = fc_update_appid(buf, count);
- if (ret < 0)
- return -EINVAL;
- return count;
-}
-static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
+#endif /* CONFIG_BLK_CGROUP_FC_APPID */
static struct attribute *nvme_fc_attrs[] = {
&dev_attr_nvme_discovery.attr,
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
&dev_attr_appid_store.attr,
+#endif
NULL
};
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 554566371ffa..096b1b47d750 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -5,6 +5,7 @@
*/
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
+#include <linux/io_uring.h>
#include "nvme.h"
/*
@@ -53,10 +54,21 @@ out:
return ERR_PTR(ret);
}
-static int nvme_submit_user_cmd(struct request_queue *q,
+static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
+ void *meta, unsigned len, int ret)
+{
+ if (!ret && req_op(req) == REQ_OP_DRV_IN &&
+ copy_to_user(ubuf, meta, len))
+ ret = -EFAULT;
+ kfree(meta);
+ return ret;
+}
+
+static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+ u32 meta_seed, void **metap, unsigned timeout, bool vec,
+ unsigned int rq_flags, blk_mq_req_flags_t blk_flags)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -66,9 +78,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void *meta = NULL;
int ret;
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
- return PTR_ERR(req);
+ return req;
nvme_init_request(req, cmd);
if (timeout)
@@ -105,26 +117,50 @@ static int nvme_submit_user_cmd(struct request_queue *q,
goto out_unmap;
}
req->cmd_flags |= REQ_INTEGRITY;
+ *metap = meta;
}
}
+ return req;
+
+out_unmap:
+ if (bio)
+ blk_rq_unmap_user(bio);
+out:
+ blk_mq_free_request(req);
+ return ERR_PTR(ret);
+}
+
+static int nvme_submit_user_cmd(struct request_queue *q,
+ struct nvme_command *cmd, void __user *ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+{
+ struct request *req;
+ void *meta = NULL;
+ struct bio *bio;
+ int ret;
+
+ req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ bio = req->bio;
+
ret = nvme_execute_passthru_rq(req);
+
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
- if (meta && !ret && !write) {
- if (copy_to_user(meta_buffer, meta, meta_len))
- ret = -EFAULT;
- }
- kfree(meta);
- out_unmap:
+ if (meta)
+ ret = nvme_finish_user_metadata(req, meta_buffer, meta,
+ meta_len, ret);
if (bio)
blk_rq_unmap_user(bio);
- out:
blk_mq_free_request(req);
return ret;
}
-
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
@@ -296,6 +332,139 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return status;
}
+struct nvme_uring_data {
+ __u64 metadata;
+ __u64 addr;
+ __u32 data_len;
+ __u32 metadata_len;
+ __u32 timeout_ms;
+};
+
+/*
+ * This overlays struct io_uring_cmd pdu.
+ * Expect build errors if this grows larger than that.
+ */
+struct nvme_uring_cmd_pdu {
+ union {
+ struct bio *bio;
+ struct request *req;
+ };
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ u32 meta_len;
+};
+
+static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ struct request *req = pdu->req;
+ struct bio *bio = req->bio;
+ int status;
+ u64 result;
+
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ status = -EINTR;
+ else
+ status = nvme_req(req)->status;
+
+ result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ if (pdu->meta)
+ status = nvme_finish_user_metadata(req, pdu->meta_buffer,
+ pdu->meta, pdu->meta_len, status);
+ if (bio)
+ blk_rq_unmap_user(bio);
+ blk_mq_free_request(req);
+
+ io_uring_cmd_done(ioucmd, status, result);
+}
+
+static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ /* extract bio before reusing the same field for request */
+ struct bio *bio = pdu->bio;
+
+ pdu->req = req;
+ req->bio = bio;
+ /* this takes care of moving rest of completion-work to task context */
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+}
+
+static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ const struct nvme_uring_cmd *cmd = ioucmd->cmd;
+ struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
+ struct nvme_uring_data d;
+ struct nvme_command c;
+ struct request *req;
+ unsigned int rq_flags = 0;
+ blk_mq_req_flags_t blk_flags = 0;
+ void *meta = NULL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ c.common.opcode = READ_ONCE(cmd->opcode);
+ c.common.flags = READ_ONCE(cmd->flags);
+ if (c.common.flags)
+ return -EINVAL;
+
+ c.common.command_id = 0;
+ c.common.nsid = cpu_to_le32(cmd->nsid);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
+ return -EINVAL;
+
+ c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
+ c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
+ c.common.metadata = 0;
+ c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
+ c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
+ c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
+ c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
+ c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
+ c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
+ c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
+
+ d.metadata = READ_ONCE(cmd->metadata);
+ d.addr = READ_ONCE(cmd->addr);
+ d.data_len = READ_ONCE(cmd->data_len);
+ d.metadata_len = READ_ONCE(cmd->metadata_len);
+ d.timeout_ms = READ_ONCE(cmd->timeout_ms);
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ rq_flags = REQ_NOWAIT;
+ blk_flags = BLK_MQ_REQ_NOWAIT;
+ }
+
+ req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, d.timeout_ms ?
+ msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
+ blk_flags);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->end_io_data = ioucmd;
+
+ /* to free bio on completion, as req->bio will be null at that time */
+ pdu->bio = req->bio;
+ pdu->meta = meta;
+ pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
+ pdu->meta_len = d.metadata_len;
+
+ blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
+ return -EIOCBQUEUED;
+}
+
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
@@ -387,6 +556,53 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return __nvme_ioctl(ns, cmd, (void __user *)arg);
}
+static int nvme_uring_cmd_checks(unsigned int issue_flags)
+{
+ /* IOPOLL not supported yet */
+ if (issue_flags & IO_URING_F_IOPOLL)
+ return -EOPNOTSUPP;
+
+ /* NVMe passthrough requires big SQE/CQE support */
+ if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
+ (IO_URING_F_SQE128|IO_URING_F_CQE32))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
+
+ ret = nvme_uring_cmd_checks(issue_flags);
+ if (ret)
+ return ret;
+
+ switch (ioucmd->cmd_op) {
+ case NVME_URING_CMD_IO:
+ ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
+ break;
+ case NVME_URING_CMD_IO_VEC:
+ ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct nvme_ns, cdev);
+
+ return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
+}
+
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -453,8 +669,46 @@ out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
+
+int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EINVAL;
+
+ if (ns)
+ ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
#endif /* CONFIG_NVME_MULTIPATH */
+int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct nvme_ctrl *ctrl = ioucmd->file->private_data;
+ int ret;
+
+ ret = nvme_uring_cmd_checks(issue_flags);
+ if (ret)
+ return ret;
+
+ switch (ioucmd->cmd_op) {
+ case NVME_URING_CMD_ADMIN:
+ ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
+ break;
+ case NVME_URING_CMD_ADMIN_VEC:
+ ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
struct nvme_ns *ns;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d464fdf978fb..d3e2440d8abb 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -437,6 +437,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.release = nvme_ns_head_chr_release,
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_ns_head_chr_uring_cmd,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1393bbf82d71..9b72b6ecf33c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -144,6 +144,11 @@ enum nvme_quirks {
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
+
+ /*
+ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ */
+ NVME_QUIRK_BOGUS_NID = (1 << 18),
};
/*
@@ -279,6 +284,7 @@ struct nvme_ctrl {
#endif
u16 crdt[3];
u16 oncs;
+ u32 dmrsl;
u16 oacs;
u16 sqsize;
u32 max_namespaces;
@@ -777,7 +783,12 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
+int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d817ca17463e..5a98a7de0964 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1439,6 +1439,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
nvme_init_request(abort_req, &cmd);
abort_req->end_io_data = NULL;
+ abort_req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(abort_req, false, abort_endio);
/*
@@ -1775,6 +1776,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
if (IS_ERR(dev->ctrl.admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
+ dev->ctrl.admin_q = NULL;
return -ENOMEM;
}
if (!blk_get_queue(dev->ctrl.admin_q)) {
@@ -2486,6 +2488,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
+ req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
return 0;
@@ -2675,7 +2678,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
struct pci_dev *pdev = to_pci_dev(dev->dev);
mutex_lock(&dev->shutdown_lock);
- if (pci_is_enabled(pdev)) {
+ if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) {
u32 csts = readl(dev->bar + NVME_REG_CSTS);
if (dev->ctrl.state == NVME_CTRL_LIVE ||
@@ -3409,7 +3412,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
@@ -3447,6 +3453,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d9f19d901313..b87c8ae41d9b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2010,10 +2010,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
struct nvme_rdma_queue *queue = req->queue;
nvme_rdma_stop_queue(queue);
- if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
- nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
- blk_mq_complete_request(rq);
- }
+ nvmf_complete_timed_out_request(rq);
}
static enum blk_eh_timer_return
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index ad3a2bf2f1e9..bb67538d241b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2318,10 +2318,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
- if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
- nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
- blk_mq_complete_request(rq);
- }
+ nvmf_complete_timed_out_request(rq);
}
static enum blk_eh_timer_return
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index d886c2c59554..27a72504d31c 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -360,7 +360,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
ret = __blkdev_issue_discard(ns->bdev,
nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
- GFP_KERNEL, 0, bio);
+ GFP_KERNEL, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
return errno_to_nvme_status(req, ret);
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index e34718b09550..82b61acf7a72 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -34,8 +34,7 @@ static int validate_conv_zones_cb(struct blk_zone *z,
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
{
- struct request_queue *q = ns->bdev->bd_disk->queue;
- u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q));
+ u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
struct gendisk *bd_disk = ns->bdev->bd_disk;
int ret;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 6ab90891801d..816028c0f6ed 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1550,6 +1550,11 @@ static const struct qcom_pcie_cfg sc7280_cfg = {
.pipe_clk_need_muxing = true,
};
+static const struct qcom_pcie_cfg sc8180x_cfg = {
+ .ops = &ops_1_9_0,
+ .has_tbu_clk = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1656,7 +1661,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
{ .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
- { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg },
+ { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 09d9bf465d72..ffec82c8a523 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -272,7 +272,6 @@ struct advk_pcie {
u32 actions;
} wins[OB_WIN_COUNT];
u8 wins_count;
- int irq;
struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
@@ -1570,26 +1569,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
}
}
-static void advk_pcie_irq_handler(struct irq_desc *desc)
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
{
- struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 val, mask, status;
+ struct advk_pcie *pcie = arg;
+ u32 status;
- chained_irq_enter(chip, desc);
+ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ if (!(status & PCIE_IRQ_CORE_INT))
+ return IRQ_NONE;
- val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
- mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
- status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
+ advk_pcie_handle_int(pcie);
- if (status & PCIE_IRQ_CORE_INT) {
- advk_pcie_handle_int(pcie);
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
- /* Clear interrupt */
- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
- }
-
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
}
static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -1669,7 +1663,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- int ret;
+ int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
if (!bridge)
@@ -1755,9 +1749,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq < 0)
- return pcie->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+ pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
"reset-gpios", 0,
@@ -1814,15 +1816,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
-
bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops;
bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge);
if (ret < 0) {
- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
@@ -1871,9 +1870,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
- /* Remove IRQ handler */
- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
-
/* Remove IRQ domains */
advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 558b35aba610..d270a204324e 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3407,6 +3407,15 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus->bridge->domain_nr = dom;
#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
+#elif defined(CONFIG_ARM64)
+ /*
+ * Set the PCI bus parent to be the corresponding VMbus
+ * device. Then the VMbus device will be assigned as the
+ * ACPI companion in pcibios_root_bridge_prepare() and
+ * pci_dma_configure() will propagate device coherence
+ * information to devices created on the bus.
+ */
+ hbus->sysdata.parent = hdev->device.parent;
#endif
hbus->hdev = hdev;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9ecce435fb3f..d25122fbe98a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2920,6 +2920,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
+ /*
+ * Downstream device is not accessible after putting a root port
+ * into D3cold and back into D0 on Elo i2.
+ */
+ .ident = "Elo i2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
+ },
},
#endif
{ }
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index afdcb91601d2..1e2d69453771 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -187,7 +187,7 @@ source "drivers/perf/hisilicon/Kconfig"
config MARVELL_CN10K_DDR_PMU
tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
- depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
help
Enable perf support for Marvell DDR Performance monitoring
event on CN10K platform.
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9694370651fa..59d3980b8ca2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -400,6 +400,9 @@ validate_group(struct perf_event *event)
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
+ if (event == leader)
+ return 0;
+
for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
@@ -489,12 +492,7 @@ __hw_perf_event_init(struct perf_event *event)
local64_set(&hwc->period_left, hwc->sample_period);
}
- if (event->group_leader != event) {
- if (validate_group(event) != 0)
- return -EINVAL;
- }
-
- return 0;
+ return validate_group(event);
}
static int armpmu_event_init(struct perf_event *event)
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 94ebc1ecace7..b1b2a55de77f 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -29,7 +29,7 @@
#define CNTL_OVER_MASK 0xFFFFFFFE
#define CNTL_CSV_SHIFT 24
-#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
+#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 7640491aab12..30234c261b05 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
{
u64 mpidr;
int cpu_cluster_id;
- struct cluster_pmu *cluster = NULL;
+ struct cluster_pmu *cluster;
/*
* This assumes that the cluster_id is in MPIDR[aff1] for
@@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
cluster->cluster_id);
cpumask_set_cpu(cpu, &cluster->cluster_cpus);
*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
- break;
+ return cluster;
}
- return cluster;
+ return NULL;
}
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index 5b471ab80fe2..54d65a6f0fcc 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -414,19 +414,19 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->clk_ref);
if (ret)
- goto err_disable_clk_ref;
+ return ret;
priv->reset = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(priv->reset))
- return PTR_ERR(priv->reset);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto err_disable_clk_ref;
+ }
priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops);
if (IS_ERR(priv->phy)) {
ret = PTR_ERR(priv->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
+ dev_err_probe(dev, ret, "failed to create PHY\n");
+ goto err_disable_clk_ref;
}
phy_set_drvdata(priv->phy, priv);
@@ -434,8 +434,12 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
phy_g12a_usb3_pcie_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto err_disable_clk_ref;
+ }
- return PTR_ERR_OR_ZERO(phy_provider);
+ return 0;
err_disable_clk_ref:
clk_disable_unprepare(priv->clk_ref);
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 5172971f4c36..3cd4d51c247c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -629,7 +629,8 @@ idle:
cleanup:
if (error < 0)
phy_mdm6600_device_power_off(ddata);
-
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
return error;
}
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 9ec234243f7c..595adba5fb8f 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -187,6 +187,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
return -EINVAL;
sata_phy->client = of_find_i2c_device_by_node(node);
+ of_node_put(node);
if (!sata_phy->client)
return -EPROBE_DEFER;
@@ -195,20 +196,21 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
if (IS_ERR(sata_phy->phyclk)) {
dev_err(dev, "failed to get clk for PHY\n");
- return PTR_ERR(sata_phy->phyclk);
+ ret = PTR_ERR(sata_phy->phyclk);
+ goto put_dev;
}
ret = clk_prepare_enable(sata_phy->phyclk);
if (ret < 0) {
dev_err(dev, "failed to enable source clk\n");
- return ret;
+ goto put_dev;
}
sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops);
if (IS_ERR(sata_phy->phy)) {
- clk_disable_unprepare(sata_phy->phyclk);
dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(sata_phy->phy);
+ ret = PTR_ERR(sata_phy->phy);
+ goto clk_disable;
}
phy_set_drvdata(sata_phy->phy, sata_phy);
@@ -216,11 +218,18 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
- clk_disable_unprepare(sata_phy->phyclk);
- return PTR_ERR(phy_provider);
+ ret = PTR_ERR(phy_provider);
+ goto clk_disable;
}
return 0;
+
+clk_disable:
+ clk_disable_unprepare(sata_phy->phyclk);
+put_dev:
+ put_device(&sata_phy->client->dev);
+
+ return ret;
}
static const struct of_device_id exynos_sata_phy_of_match[] = {
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index c1211c4f863c..0be727bb9f79 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -838,7 +838,7 @@ static int serdes_am654_probe(struct platform_device *pdev)
clk_err:
of_clk_del_provider(node);
-
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 3a505fe5715a..31a775877f6e 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -215,7 +215,7 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy)
return 0;
err1:
- clk_disable(phy->wkupclk);
+ clk_disable_unprepare(phy->wkupclk);
err0:
return ret;
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 2cbc91e535d4..f502c36f3be5 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -696,6 +696,7 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
}
control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
if (!control_pdev) {
dev_err(dev, "Failed to get control device\n");
return -EINVAL;
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index a0cdbcadf09e..c3ab4b69ea68 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -155,7 +155,7 @@ static int tusb1210_set_mode(struct phy *phy, enum phy_mode mode, int submode)
}
#ifdef CONFIG_POWER_SUPPLY
-const char * const tusb1210_chg_det_states[] = {
+static const char * const tusb1210_chg_det_states[] = {
"CHG_DET_CONNECTING",
"CHG_DET_START_DET",
"CHG_DET_READ_DET",
@@ -537,12 +537,18 @@ static int tusb1210_probe(struct ulpi *ulpi)
tusb1210_probe_charger_detect(tusb);
tusb->phy = ulpi_phy_create(ulpi, &phy_ops);
- if (IS_ERR(tusb->phy))
- return PTR_ERR(tusb->phy);
+ if (IS_ERR(tusb->phy)) {
+ ret = PTR_ERR(tusb->phy);
+ goto err_remove_charger;
+ }
phy_set_drvdata(tusb->phy, tusb);
ulpi_set_drvdata(ulpi, tusb);
return 0;
+
+err_remove_charger:
+ tusb1210_remove_charger_detect(tusb);
+ return ret;
}
static void tusb1210_remove(struct ulpi *ulpi)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index a3fa03bcd9a3..80838dc54b3a 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -1236,18 +1236,17 @@ FUNC_GROUP_DECL(SALT8, AA12);
FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196
-SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 4));
+SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
+PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2),
SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197
-SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 5));
+SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
+PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3),
SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+FUNC_GROUP_DECL(FWQSPI, AE12, AF12);
#define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@@ -1520,9 +1519,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
-GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
-FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
+FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
@@ -1918,7 +1916,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID),
- ASPEED_PINCTRL_GROUP(FWQSPID),
+ ASPEED_PINCTRL_GROUP(FWQSPI),
ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1),
@@ -2160,6 +2158,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(FSI2),
ASPEED_PINCTRL_FUNC(FWSPIABR),
ASPEED_PINCTRL_FUNC(FWSPID),
+ ASPEED_PINCTRL_FUNC(FWQSPI),
ASPEED_PINCTRL_FUNC(FWSPIWP),
ASPEED_PINCTRL_FUNC(GPIT0),
ASPEED_PINCTRL_FUNC(GPIT1),
diff --git a/drivers/pinctrl/intel/pinctrl-alderlake.c b/drivers/pinctrl/intel/pinctrl-alderlake.c
index 32ba50efbceb..62dbd1e67513 100644
--- a/drivers/pinctrl/intel/pinctrl-alderlake.c
+++ b/drivers/pinctrl/intel/pinctrl-alderlake.c
@@ -14,11 +14,17 @@
#include "pinctrl-intel.h"
-#define ADL_PAD_OWN 0x0a0
-#define ADL_PADCFGLOCK 0x110
-#define ADL_HOSTSW_OWN 0x150
-#define ADL_GPI_IS 0x200
-#define ADL_GPI_IE 0x220
+#define ADL_N_PAD_OWN 0x020
+#define ADL_N_PADCFGLOCK 0x080
+#define ADL_N_HOSTSW_OWN 0x0b0
+#define ADL_N_GPI_IS 0x100
+#define ADL_N_GPI_IE 0x120
+
+#define ADL_S_PAD_OWN 0x0a0
+#define ADL_S_PADCFGLOCK 0x110
+#define ADL_S_HOSTSW_OWN 0x150
+#define ADL_S_GPI_IS 0x200
+#define ADL_S_GPI_IE 0x220
#define ADL_GPP(r, s, e, g) \
{ \
@@ -28,14 +34,28 @@
.gpio_base = (g), \
}
-#define ADL_COMMUNITY(b, s, e, g) \
+#define ADL_N_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = ADL_N_PAD_OWN, \
+ .padcfglock_offset = ADL_N_PADCFGLOCK, \
+ .hostown_offset = ADL_N_HOSTSW_OWN, \
+ .is_offset = ADL_N_GPI_IS, \
+ .ie_offset = ADL_N_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+#define ADL_S_COMMUNITY(b, s, e, g) \
{ \
.barno = (b), \
- .padown_offset = ADL_PAD_OWN, \
- .padcfglock_offset = ADL_PADCFGLOCK, \
- .hostown_offset = ADL_HOSTSW_OWN, \
- .is_offset = ADL_GPI_IS, \
- .ie_offset = ADL_GPI_IE, \
+ .padown_offset = ADL_S_PAD_OWN, \
+ .padcfglock_offset = ADL_S_PADCFGLOCK, \
+ .hostown_offset = ADL_S_HOSTSW_OWN, \
+ .is_offset = ADL_S_GPI_IS, \
+ .ie_offset = ADL_S_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
.gpps = (g), \
@@ -342,10 +362,10 @@ static const struct intel_padgroup adln_community5_gpps[] = {
};
static const struct intel_community adln_communities[] = {
- ADL_COMMUNITY(0, 0, 66, adln_community0_gpps),
- ADL_COMMUNITY(1, 67, 168, adln_community1_gpps),
- ADL_COMMUNITY(2, 169, 248, adln_community4_gpps),
- ADL_COMMUNITY(3, 249, 256, adln_community5_gpps),
+ ADL_N_COMMUNITY(0, 0, 66, adln_community0_gpps),
+ ADL_N_COMMUNITY(1, 67, 168, adln_community1_gpps),
+ ADL_N_COMMUNITY(2, 169, 248, adln_community4_gpps),
+ ADL_N_COMMUNITY(3, 249, 256, adln_community5_gpps),
};
static const struct intel_pinctrl_soc_data adln_soc_data = {
@@ -713,11 +733,11 @@ static const struct intel_padgroup adls_community5_gpps[] = {
};
static const struct intel_community adls_communities[] = {
- ADL_COMMUNITY(0, 0, 94, adls_community0_gpps),
- ADL_COMMUNITY(1, 95, 150, adls_community1_gpps),
- ADL_COMMUNITY(2, 151, 199, adls_community3_gpps),
- ADL_COMMUNITY(3, 200, 269, adls_community4_gpps),
- ADL_COMMUNITY(4, 270, 303, adls_community5_gpps),
+ ADL_S_COMMUNITY(0, 0, 94, adls_community0_gpps),
+ ADL_S_COMMUNITY(1, 95, 150, adls_community1_gpps),
+ ADL_S_COMMUNITY(2, 151, 199, adls_community3_gpps),
+ ADL_S_COMMUNITY(3, 200, 269, adls_community4_gpps),
+ ADL_S_COMMUNITY(4, 270, 303, adls_community5_gpps),
};
static const struct intel_pinctrl_soc_data adls_soc_data = {
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 8dca1ef04965..40accd110c3d 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -30,6 +30,7 @@ config PINCTRL_MTK_MOORE
select GENERIC_PINMUX_FUNCTIONS
select GPIOLIB
select OF_GPIO
+ select EINT_MTK
select PINCTRL_MTK_V2
config PINCTRL_MTK_PARIS
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
index 727c65221aef..57f37a294063 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
@@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
- MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16),
+ MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1a7d686494ff..0645c2c24f50 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -387,6 +387,8 @@ static void amd_gpio_irq_enable(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ gpiochip_enable_irq(gc, d->hwirq);
+
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
@@ -408,6 +410,8 @@ static void amd_gpio_irq_disable(struct irq_data *d)
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
static void amd_gpio_irq_mask(struct irq_data *d)
@@ -577,7 +581,7 @@ static void amd_irq_ack(struct irq_data *d)
*/
}
-static struct irq_chip amd_gpio_irqchip = {
+static const struct irq_chip amd_gpio_irqchip = {
.name = "amd_gpio",
.irq_ack = amd_irq_ack,
.irq_enable = amd_gpio_irq_enable,
@@ -593,7 +597,8 @@ static struct irq_chip amd_gpio_irqchip = {
* the wake event. Otherwise the wake event will never clear and
* prevent the system from suspending.
*/
- .flags = IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
+ .flags = IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
@@ -1026,7 +1031,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
amd_gpio_irq_init(gpio_dev);
girq = &gpio_dev->gc.irq;
- girq->chip = &amd_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index 72f4dd2466e1..5e610849dfc3 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -36,7 +36,6 @@ struct apple_gpio_pinctrl {
struct pinctrl_desc pinctrl_desc;
struct gpio_chip gpio_chip;
- struct irq_chip irq_chip;
u8 irqgrps[];
};
@@ -275,17 +274,21 @@ static unsigned int apple_gpio_irq_type(unsigned int type)
static void apple_gpio_irq_mask(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(gc);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
+ gpiochip_disable_irq(gc, data->hwirq);
}
static void apple_gpio_irq_unmask(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(gc);
unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
+ gpiochip_enable_irq(gc, data->hwirq);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
FIELD_PREP(REG_GPIOx_MODE, irqtype));
}
@@ -343,13 +346,15 @@ static void apple_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static struct irq_chip apple_gpio_irqchip = {
- .name = "Apple-GPIO",
- .irq_startup = apple_gpio_irq_startup,
- .irq_ack = apple_gpio_irq_ack,
- .irq_mask = apple_gpio_irq_mask,
- .irq_unmask = apple_gpio_irq_unmask,
- .irq_set_type = apple_gpio_irq_set_type,
+static const struct irq_chip apple_gpio_irqchip = {
+ .name = "Apple-GPIO",
+ .irq_startup = apple_gpio_irq_startup,
+ .irq_ack = apple_gpio_irq_ack,
+ .irq_mask = apple_gpio_irq_mask,
+ .irq_unmask = apple_gpio_irq_unmask,
+ .irq_set_type = apple_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
/* Probe & register */
@@ -360,8 +365,6 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
void **irq_data = NULL;
int ret;
- pctl->irq_chip = apple_gpio_irqchip;
-
pctl->gpio_chip.label = dev_name(pctl->dev);
pctl->gpio_chip.request = gpiochip_generic_request;
pctl->gpio_chip.free = gpiochip_generic_free;
@@ -377,7 +380,7 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
if (girq->num_parents) {
int i;
- girq->chip = &pctl->irq_chip;
+ gpio_irq_chip_set_chip(girq, &apple_gpio_irqchip);
girq->parent_handler = apple_gpio_irq_handler;
girq->parents = kmalloc_array(girq->num_parents,
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 003fb0e34153..6a956ee94494 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -129,6 +129,7 @@ enum {
FUNC_PTP1,
FUNC_PTP2,
FUNC_PTP3,
+ FUNC_PTPSYNC_0,
FUNC_PTPSYNC_1,
FUNC_PTPSYNC_2,
FUNC_PTPSYNC_3,
@@ -252,6 +253,7 @@ static const char *const ocelot_function_names[] = {
[FUNC_PTP1] = "ptp1",
[FUNC_PTP2] = "ptp2",
[FUNC_PTP3] = "ptp3",
+ [FUNC_PTPSYNC_0] = "ptpsync_0",
[FUNC_PTPSYNC_1] = "ptpsync_1",
[FUNC_PTPSYNC_2] = "ptpsync_2",
[FUNC_PTPSYNC_3] = "ptpsync_3",
@@ -983,7 +985,7 @@ LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NON
LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
-LAN966X_P(35, GPIO, FC1_b, NONE, SGPIO_a, CAN0_b, NONE, NONE, R);
+LAN966X_P(35, GPIO, FC1_b, PTPSYNC_0, SGPIO_a, CAN0_b, NONE, NONE, R);
LAN966X_P(36, GPIO, NONE, PTPSYNC_1, NONE, CAN0_b, NONE, NONE, R);
LAN966X_P(37, GPIO, FC_SHRD0, PTPSYNC_2, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
LAN966X_P(38, GPIO, NONE, PTPSYNC_3, NONE, NONE, NONE, NONE, R);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 8d271c6b0ca4..5de691c630b4 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1374,10 +1374,10 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
}
irq = irq_of_parse_and_map(child, 0);
- if (irq < 0) {
- dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+ if (!irq) {
+ dev_err(pctl->dev, "No IRQ for bank %u\n", i);
of_node_put(child);
- ret = irq;
+ ret = -EINVAL;
goto err;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index a1b598b86aa9..2cb79e649fcf 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -457,95 +457,110 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
{
+ /* gpio1b6_sel */
.num = 1,
.pin = 14,
.reg = 0x28,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio1b7_sel */
.num = 1,
.pin = 15,
.reg = 0x2c,
.bit = 0,
.mask = 0x3
}, {
+ /* gpio1c2_sel */
.num = 1,
.pin = 18,
.reg = 0x30,
.bit = 4,
.mask = 0xf
}, {
+ /* gpio1c3_sel */
.num = 1,
.pin = 19,
.reg = 0x30,
.bit = 8,
.mask = 0xf
}, {
+ /* gpio1c4_sel */
.num = 1,
.pin = 20,
.reg = 0x30,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio1c5_sel */
.num = 1,
.pin = 21,
.reg = 0x34,
.bit = 0,
.mask = 0xf
}, {
+ /* gpio1c6_sel */
.num = 1,
.pin = 22,
.reg = 0x34,
.bit = 4,
.mask = 0xf
}, {
+ /* gpio1c7_sel */
.num = 1,
.pin = 23,
.reg = 0x34,
.bit = 8,
.mask = 0xf
}, {
- .num = 3,
- .pin = 12,
- .reg = 0x68,
- .bit = 8,
- .mask = 0xf
- }, {
- .num = 3,
- .pin = 13,
- .reg = 0x68,
- .bit = 12,
- .mask = 0xf
- }, {
+ /* gpio2a2_sel */
.num = 2,
.pin = 2,
- .reg = 0x608,
- .bit = 0,
- .mask = 0x7
+ .reg = 0x40,
+ .bit = 4,
+ .mask = 0x3
}, {
+ /* gpio2a3_sel */
.num = 2,
.pin = 3,
- .reg = 0x608,
- .bit = 4,
- .mask = 0x7
+ .reg = 0x40,
+ .bit = 6,
+ .mask = 0x3
}, {
+ /* gpio2c0_sel */
.num = 2,
.pin = 16,
- .reg = 0x610,
- .bit = 8,
- .mask = 0x7
+ .reg = 0x50,
+ .bit = 0,
+ .mask = 0x3
}, {
+ /* gpio3b2_sel */
.num = 3,
.pin = 10,
- .reg = 0x610,
- .bit = 0,
- .mask = 0x7
+ .reg = 0x68,
+ .bit = 4,
+ .mask = 0x3
}, {
+ /* gpio3b3_sel */
.num = 3,
.pin = 11,
- .reg = 0x610,
- .bit = 4,
- .mask = 0x7
+ .reg = 0x68,
+ .bit = 6,
+ .mask = 0x3
+ }, {
+ /* gpio3b4_sel */
+ .num = 3,
+ .pin = 12,
+ .reg = 0x68,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ /* gpio3b5_sel */
+ .num = 3,
+ .pin = 13,
+ .reg = 0x68,
+ .bit = 12,
+ .mask = 0xf
},
};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 966ea6622ff3..a2abfe987ab1 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -42,7 +42,6 @@
* @chip: gpiochip handle.
* @desc: pin controller descriptor
* @restart_nb: restart notifier block.
- * @irq_chip: irq chip information
* @irq: parent irq for the TLMM irq_chip.
* @intr_target_use_scm: route irq to application cpu using scm calls
* @lock: Spinlock to protect register resources as well
@@ -63,7 +62,6 @@ struct msm_pinctrl {
struct pinctrl_desc desc;
struct notifier_block restart_nb;
- struct irq_chip irq_chip;
int irq;
bool intr_target_use_scm;
@@ -868,6 +866,8 @@ static void msm_gpio_irq_enable(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ gpiochip_enable_irq(gc, d->hwirq);
+
if (d->parent_data)
irq_chip_enable_parent(d);
@@ -885,6 +885,8 @@ static void msm_gpio_irq_disable(struct irq_data *d)
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
msm_gpio_irq_mask(d);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
/**
@@ -958,6 +960,14 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
+static void msm_gpio_irq_eoi(struct irq_data *d)
+{
+ d = d->parent_data;
+
+ if (d)
+ d->chip->irq_eoi(d);
+}
+
static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
unsigned int type)
{
@@ -1255,6 +1265,26 @@ static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
return device_property_count_u16(pctrl->dev, "gpios") > 0;
}
+static const struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_enable = msm_gpio_irq_enable,
+ .irq_disable = msm_gpio_irq_disable,
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_eoi = msm_gpio_irq_eoi,
+ .irq_set_type = msm_gpio_irq_set_type,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+ .irq_request_resources = msm_gpio_irq_reqres,
+ .irq_release_resources = msm_gpio_irq_relres,
+ .irq_set_affinity = msm_gpio_irq_set_affinity,
+ .irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity,
+ .flags = (IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND |
+ IRQCHIP_IMMUTABLE),
+};
+
static int msm_gpio_init(struct msm_pinctrl *pctrl)
{
struct gpio_chip *chip;
@@ -1276,22 +1306,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
if (msm_gpio_needs_valid_mask(pctrl))
chip->init_valid_mask = msm_gpio_init_valid_mask;
- pctrl->irq_chip.name = "msmgpio";
- pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
- pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
- pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
- pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
- pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
- pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
- pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
- pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
- pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
- pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
- pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
- pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
- IRQCHIP_SET_TYPE_MASKED |
- IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
-
np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
if (np) {
chip->irq.parent_domain = irq_find_matching_host(np,
@@ -1300,7 +1314,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
if (!chip->irq.parent_domain)
return -EPROBE_DEFER;
chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
- pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
/*
* Let's skip handling the GPIOs, if the parent irqchip
* is handling the direct connect IRQ of the GPIO.
@@ -1313,7 +1326,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
}
girq = &chip->irq;
- girq->chip = &pctrl->irq_chip;
+ gpio_irq_chip_set_chip(girq, &msm_gpio_irq_chip);
girq->parent_handler = msm_gpio_irq_handler;
girq->fwnode = pctrl->dev->fwnode;
girq->num_parents = 1;
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index 4d37b817b232..a91a86628f2f 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -264,14 +264,14 @@ static const struct pinctrl_pin_desc sm6350_pins[] = {
PINCTRL_PIN(153, "GPIO_153"),
PINCTRL_PIN(154, "GPIO_154"),
PINCTRL_PIN(155, "GPIO_155"),
- PINCTRL_PIN(156, "SDC1_RCLK"),
- PINCTRL_PIN(157, "SDC1_CLK"),
- PINCTRL_PIN(158, "SDC1_CMD"),
- PINCTRL_PIN(159, "SDC1_DATA"),
- PINCTRL_PIN(160, "SDC2_CLK"),
- PINCTRL_PIN(161, "SDC2_CMD"),
- PINCTRL_PIN(162, "SDC2_DATA"),
- PINCTRL_PIN(163, "UFS_RESET"),
+ PINCTRL_PIN(156, "UFS_RESET"),
+ PINCTRL_PIN(157, "SDC1_RCLK"),
+ PINCTRL_PIN(158, "SDC1_CLK"),
+ PINCTRL_PIN(159, "SDC1_CMD"),
+ PINCTRL_PIN(160, "SDC1_DATA"),
+ PINCTRL_PIN(161, "SDC2_CLK"),
+ PINCTRL_PIN(162, "SDC2_CMD"),
+ PINCTRL_PIN(163, "SDC2_DATA"),
};
#define DECLARE_MSM_GPIO_PINS(pin) \
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index dfd805e76862..7b0576f71376 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,14 +4,13 @@
#
config PINCTRL_SAMSUNG
bool
- depends on OF_GPIO
+ select GPIOLIB
select PINMUX
select PINCONF
config PINCTRL_EXYNOS
bool "Pinctrl common driver part for Samsung Exynos SoCs"
- depends on OF_GPIO
- depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S5PV210 || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
@@ -26,12 +25,10 @@ config PINCTRL_EXYNOS_ARM64
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on OF_GPIO
- depends on ARCH_S3C24XX || COMPILE_TEST
+ depends on ARCH_S3C24XX || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
- depends on OF_GPIO
- depends on ARCH_S3C64XX || COMPILE_TEST
+ depends on ARCH_S3C64XX || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index d291819c2f77..cb965cf93705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -770,7 +770,7 @@ static const struct samsung_pin_bank_data fsd_pin_banks2[] __initconst = {
EXYNOS850_PIN_BANK_EINTN(3, 0x00, "gpq0"),
};
-const struct samsung_pin_ctrl fsd_pin_ctrl[] __initconst = {
+static const struct samsung_pin_ctrl fsd_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 FSYS0 data */
.pin_banks = fsd_pin_banks0,
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 9ed764731570..f7c9459f6628 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -225,6 +225,13 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_gpio_free(chip->base + offset);
}
+static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+}
+
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
@@ -232,7 +239,7 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
clk_enable(bank->clk);
- ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+ ret = stm32_gpio_get_noclk(chip, offset);
clk_disable(bank->clk);
@@ -311,8 +318,12 @@ static void stm32_gpio_irq_trigger(struct irq_data *d)
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
+ /* Do not access the GPIO if this is not LEVEL triggered IRQ. */
+ if (!(bank->irq_type[d->hwirq] & IRQ_TYPE_LEVEL_MASK))
+ return;
+
/* If level interrupt type then retrig */
- level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
+ level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
@@ -354,6 +365,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+ unsigned long flags;
int ret;
ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
@@ -367,6 +379,10 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
return ret;
}
+ flags = irqd_get_trigger_type(irq_data);
+ if (flags & IRQ_TYPE_LEVEL_MASK)
+ clk_enable(bank->clk);
+
return 0;
}
@@ -374,6 +390,9 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
+ if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK)
+ clk_disable(bank->clk);
+
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
diff --git a/drivers/pinctrl/sunplus/sppctl_sp7021.c b/drivers/pinctrl/sunplus/sppctl_sp7021.c
index 9748345b9298..cd657760a644 100644
--- a/drivers/pinctrl/sunplus/sppctl_sp7021.c
+++ b/drivers/pinctrl/sunplus/sppctl_sp7021.c
@@ -419,7 +419,15 @@ static const struct sppctl_grp sp7021grps_prbp[] = {
EGRP("PROBE_PORT2", 2, pins_prp2),
};
+/*
+ * Due to compatible reason, the first valid item should start at the third
+ * position of the array. Please keep the first two items of the table
+ * no use (dummy).
+ */
const struct sppctl_func sppctl_list_funcs[] = {
+ FNCN("", pinmux_type_fpmx, 0x00, 0, 0),
+ FNCN("", pinmux_type_fpmx, 0x00, 0, 0),
+
FNCN("L2SW_CLK_OUT", pinmux_type_fpmx, 0x00, 0, 7),
FNCN("L2SW_MAC_SMI_MDC", pinmux_type_fpmx, 0x00, 8, 7),
FNCN("L2SW_LED_FLASH0", pinmux_type_fpmx, 0x01, 0, 7),
diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
index 2801ca706273..b8fc88a23cf4 100644
--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
@@ -51,7 +51,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
SUNXI_FUNCTION(0x3, "pwm0"), /* PWM0 */
SUNXI_FUNCTION(0x4, "i2s"), /* IN */
SUNXI_FUNCTION(0x5, "uart1"), /* RX */
- SUNXI_FUNCTION(0x6, "spi1")), /* MOSI */
+ SUNXI_FUNCTION(0x6, "spi1")), /* CLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index d384d36098c2..a62c5dfe42d6 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -817,7 +817,7 @@ err_cpkg:
err_bus:
return status;
}
-module_init(ssam_core_init);
+subsys_initcall(ssam_core_init);
static void __exit ssam_core_exit(void)
{
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index c1775db29efb..ec66fde28e75 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -100,6 +100,14 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
.driver_data = (void *)lid_device_props_l4D,
},
{
+ .ident = "Surface Pro 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
.ident = "Surface Book 1",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 6b8b3ab8db48..3463629f8764 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = {
.remove = acerhdf_remove,
};
-/* checks if str begins with start */
-static int str_starts_with(const char *str, const char *start)
-{
- unsigned long str_len = 0, start_len = 0;
-
- str_len = strlen(str);
- start_len = strlen(start);
-
- if (str_len >= start_len &&
- !strncmp(str, start, start_len))
- return 1;
-
- return 0;
-}
-
/* check hardware */
static int __init acerhdf_check_hardware(void)
{
@@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void)
* check if actual hardware BIOS vendor, product and version
* IDs start with the strings of BIOS table entry
*/
- if (str_starts_with(vendor, bt->vendor) &&
- str_starts_with(product, bt->product) &&
- str_starts_with(version, bt->version)) {
+ if (strstarts(vendor, bt->vendor) &&
+ strstarts(product, bt->product) &&
+ strstarts(version, bt->version)) {
found = 1;
break;
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index e9d0dbbb2887..fa4123dbdf7f 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
return 0;
}
+#ifdef CONFIG_SUSPEND
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
{
struct smu_metrics table;
@@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
table.timein_s0i3_lastcapture);
}
+#endif
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
@@ -569,6 +573,7 @@ out_unlock:
return rc;
}
+#ifdef CONFIG_SUSPEND
static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.prepare = amd_pmc_s2idle_prepare,
.restore = amd_pmc_s2idle_restore,
};
+#endif
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
@@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return 0;
}
+#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;
@@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
return 0;
}
+#endif
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
@@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
if (err)
dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
amd_pmc_dbgfs_register(dev);
return 0;
@@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+#ifdef CONFIG_SUSPEND
acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2104a2621e50..0e7fbed8a50d 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -371,10 +371,14 @@ static int asus_wmi_evaluate_method_buf(u32 method_id,
switch (obj->type) {
case ACPI_TYPE_BUFFER:
- if (obj->buffer.length > size)
+ if (obj->buffer.length > size) {
err = -ENOSPC;
- if (obj->buffer.length == 0)
+ break;
+ }
+ if (obj->buffer.length == 0) {
err = -ENODATA;
+ break;
+ }
memcpy(ret_buffer, obj->buffer.pointer, obj->buffer.length);
break;
@@ -2223,9 +2227,10 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- if (err == -ENODEV || err == -ENODATA)
- return 0;
- return err;
+ pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
+ fan_dev, err);
+ /* Don't cause probe to fail on devices without fan-curves */
+ return 0;
}
*available = true;
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index f5c72e33f9ae..05534287bc26 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/err.h>
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 8230e7a68a5e..1321687d923e 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -80,6 +80,10 @@ static struct quirk_entry quirk_dell_inspiron_1012 = {
.kbd_led_not_present = true,
};
+static struct quirk_entry quirk_dell_latitude_7520 = {
+ .kbd_missing_ac_tag = true,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
@@ -336,6 +340,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_inspiron_1012,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Latitude 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 7520"),
+ },
+ .driver_data = &quirk_dell_latitude_7520,
+ },
{ }
};
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 658bab4b7964..e87a931eab1e 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -148,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index a46d3b53bf61..7a059e02c265 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -236,7 +236,7 @@ enum ppfear_regs {
#define ADL_LPM_STATUS_LATCH_EN_OFFSET 0x1704
#define ADL_LPM_LIVE_STATUS_OFFSET 0x1764
-const char *pmc_lpm_modes[] = {
+static const char *pmc_lpm_modes[] = {
"S0i2.0",
"S0i2.1",
"S0i2.2",
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 6b6f3e2a617a..f73ecfd4a309 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -103,7 +103,7 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia
auxiliary_set_drvdata(auxdev, priv);
for (i = 0; i < intel_vsec_dev->num_resources; i++) {
- struct intel_pmt_entry *entry = &priv->entry[i];
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
if (ret < 0)
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 11d14cc0ff0a..c830e98dfa38 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -51,6 +51,8 @@
#define MBOX_TIMEOUT_US 2000
#define MBOX_TIMEOUT_ACQUIRE_US 1000
#define MBOX_POLLING_PERIOD_US 100
+#define MBOX_ACQUIRE_NUM_RETRIES 5
+#define MBOX_ACQUIRE_RETRY_DELAY_MS 500
#define MBOX_MAX_PACKETS 4
#define MBOX_OWNER_NONE 0x00
@@ -81,7 +83,7 @@ enum sdsi_command {
struct sdsi_mbox_info {
u64 *payload;
- u64 *buffer;
+ void *buffer;
int size;
};
@@ -163,9 +165,7 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
total = 0;
loop = 0;
do {
- int offset = SDSI_SIZE_MAILBOX * loop;
- void __iomem *addr = priv->mbox_addr + offset;
- u64 *buf = info->buffer + offset / SDSI_SIZE_CMD;
+ void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
u32 packet_size;
/* Poll on ready bit */
@@ -196,7 +196,7 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
break;
}
- sdsi_memcpy64_fromio(buf, addr, round_up(packet_size, SDSI_SIZE_CMD));
+ sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
total += packet_size;
@@ -243,8 +243,8 @@ static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *in
FIELD_PREP(CTRL_PACKET_SIZE, info->size);
writeq(control, priv->control_addr);
- /* Poll on run_busy bit */
- ret = readq_poll_timeout(priv->control_addr, control, !(control & CTRL_RUN_BUSY),
+ /* Poll on ready bit */
+ ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
if (ret)
@@ -263,7 +263,7 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
{
u64 control;
u32 owner;
- int ret;
+ int ret, retries = 0;
lockdep_assert_held(&priv->mb_lock);
@@ -273,13 +273,29 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
if (owner != MBOX_OWNER_NONE)
return -EBUSY;
- /* Write first qword of payload */
- writeq(info->payload[0], priv->mbox_addr);
+ /*
+ * If there has been no recent transaction and no one owns the mailbox,
+ * we should acquire it in under 1ms. However, if we've accessed it
+ * recently it may take up to 2.1 seconds to acquire it again.
+ */
+ do {
+ /* Write first qword of payload */
+ writeq(info->payload[0], priv->mbox_addr);
+
+ /* Check for ownership */
+ ret = readq_poll_timeout(priv->control_addr, control,
+ FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+
+ if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
+ retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
+ msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
+ continue;
+ }
- /* Check for ownership */
- ret = readq_poll_timeout(priv->control_addr, control,
- FIELD_GET(CTRL_OWNER, control) & MBOX_OWNER_INBAND,
- MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+ /* Either we got it or someone else did. */
+ break;
+ } while (true);
return ret;
}
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index c61f804dd44e..8f9c571d7257 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -212,6 +212,9 @@ static int __init intel_uncore_init(void)
const struct x86_cpu_id *id;
int ret;
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
id = x86_match_cpu(intel_uncore_cpu_ids);
if (!id)
return -ENODEV;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index c1d9ed9b7b67..19f6b456234f 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
if (value > samsung->kbd_led.max_brightness)
value = samsung->kbd_led.max_brightness;
- else if (value < 0)
- value = 0;
samsung->kbd_led_wk = value;
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index bce17ca97947..a01a92769c1a 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj,
if (!tlmi_priv.certificate_support)
return -EOPNOTSUPP;
- new_cert = kstrdup(buf, GFP_KERNEL);
- if (!new_cert)
- return -ENOMEM;
- /* Strip out CR if one is present */
- strip_cr(new_cert);
-
/* If empty then clear installed certificate */
- if (new_cert[0] == '\0') { /* Clear installed certificate */
- kfree(new_cert);
-
+ if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
/* Check that signature is set */
if (!setting->signature || !setting->signature[0])
return -EACCES;
@@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj,
ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
kfree(auth_str);
- if (ret)
- return ret;
- kfree(setting->certificate);
- setting->certificate = NULL;
- return count;
+ return ret ?: count;
}
+ new_cert = kstrdup(buf, GFP_KERNEL);
+ if (!new_cert)
+ return -ENOMEM;
+ /* Strip out CR if one is present */
+ strip_cr(new_cert);
+
if (setting->cert_installed) {
/* Certificate is installed so this is an update */
if (!setting->signature || !setting->signature[0]) {
@@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj,
auth_str = kasprintf(GFP_KERNEL, "%s,%s",
new_cert, setting->password);
}
- if (!auth_str) {
- kfree(new_cert);
+ kfree(new_cert);
+ if (!auth_str)
return -ENOMEM;
- }
ret = tlmi_simple_call(guid, auth_str);
kfree(auth_str);
- if (ret) {
- kfree(new_cert);
- return ret;
- }
- kfree(setting->certificate);
- setting->certificate = new_cert;
- return count;
+ return ret ?: count;
}
static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
@@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void)
kset_unregister(tlmi_priv.attribute_kset);
+ /* Free up any saved signatures */
+ kfree(tlmi_priv.pwd_admin->signature);
+ kfree(tlmi_priv.pwd_admin->save_signature);
+
/* Authentication structures */
sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_admin->kobj);
@@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void)
}
kset_unregister(tlmi_priv.authentication_kset);
-
- /* Free up any saved certificates/signatures */
- kfree(tlmi_priv.pwd_admin->certificate);
- kfree(tlmi_priv.pwd_admin->signature);
- kfree(tlmi_priv.pwd_admin->save_signature);
}
static int tlmi_sysfs_init(void)
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index 4f69df6eed07..4daba6151cd6 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -63,7 +63,6 @@ struct tlmi_pwd_setting {
int index; /*Used for HDD and NVME auth */
enum level_option level;
bool cert_installed;
- char *certificate;
char *signature;
char *save_signature;
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c568fae56db2..e6cb4a14cdd4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -309,6 +309,20 @@ struct ibm_init_struct {
struct ibm_struct *data;
};
+/* DMI Quirks */
+struct quirk_entry {
+ bool btusb_bug;
+ u32 s2idle_bug_mmio;
+};
+
+static struct quirk_entry quirk_btusb_bug = {
+ .btusb_bug = true,
+};
+
+static struct quirk_entry quirk_s2idle_bug = {
+ .s2idle_bug_mmio = 0xfed80380,
+};
+
static struct {
u32 bluetooth:1;
u32 hotkey:1;
@@ -338,6 +352,7 @@ static struct {
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
u32 kbd_lang:1;
+ struct quirk_entry *quirks;
} tp_features;
static struct {
@@ -4359,9 +4374,10 @@ static void bluetooth_exit(void)
bluetooth_shutdown();
}
-static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+static const struct dmi_system_id fwbug_list[] __initconst = {
{
.ident = "ThinkPad E485",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20KU"),
@@ -4369,6 +4385,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad E585",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20KV"),
@@ -4376,6 +4393,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A285 - 20MW",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MW"),
@@ -4383,6 +4401,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A285 - 20MX",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MX"),
@@ -4390,6 +4409,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A485 - 20MU",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MU"),
@@ -4397,14 +4417,125 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A485 - 20MV",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MV"),
},
},
+ {
+ .ident = "L14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+ }
+ },
+ {
+ .ident = "T14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+ }
+ },
+ {
+ .ident = "X13 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+ }
+ },
+ {
+ .ident = "T14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+ }
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+ }
+ },
+ {
+ .ident = "P14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+ }
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+ }
+ },
{}
};
+#ifdef CONFIG_SUSPEND
+/*
+ * Lenovo laptops from a variety of generations run a SMI handler during the D3->D0
+ * transition that occurs specifically when exiting suspend to idle which can cause
+ * large delays during resume when the IOMMU translation layer is enabled (the default
+ * behavior) for NVME devices:
+ *
+ * To avoid this firmware problem, skip the SMI handler on these machines before the
+ * D0 transition occurs.
+ */
+static void thinkpad_acpi_amd_s2idle_restore(void)
+{
+ struct resource *res;
+ void __iomem *addr;
+ u8 val;
+
+ res = request_mem_region_muxed(tp_features.quirks->s2idle_bug_mmio, 1,
+ "thinkpad_acpi_pm80");
+ if (!res)
+ return;
+
+ addr = ioremap(tp_features.quirks->s2idle_bug_mmio, 1);
+ if (!addr)
+ goto cleanup_resource;
+
+ val = ioread8(addr);
+ iowrite8(val & ~BIT(0), addr);
+
+ iounmap(addr);
+cleanup_resource:
+ release_resource(res);
+}
+
+static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
+ .restore = thinkpad_acpi_amd_s2idle_restore,
+};
+#endif
+
static const struct pci_device_id fwbug_cards_ids[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
@@ -4419,7 +4550,8 @@ static int __init have_bt_fwbug(void)
* Some AMD based ThinkPads have a firmware bug that calling
* "GBDC" will cause bluetooth on Intel wireless cards blocked
*/
- if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
+ if (tp_features.quirks && tp_features.quirks->btusb_bug &&
+ pci_dev_present(fwbug_cards_ids)) {
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
FW_BUG "disable bluetooth subdriver for Intel cards\n");
return 1;
@@ -8748,24 +8880,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_status_access_mode = TPACPI_FAN_RD_TPEC;
if (quirks & TPACPI_FAN_Q1)
fan_quirk1_setup();
- if (quirks & TPACPI_FAN_2FAN) {
- tp_features.second_fan = 1;
- pr_info("secondary fan support enabled\n");
- }
- if (quirks & TPACPI_FAN_2CTL) {
- tp_features.second_fan = 1;
- tp_features.second_fan_ctl = 1;
- pr_info("secondary fan control enabled\n");
- }
/* Try and probe the 2nd fan */
+ tp_features.second_fan = 1; /* needed for get_speed to work */
res = fan2_get_speed(&speed);
if (res >= 0) {
/* It responded - so let's assume it's there */
tp_features.second_fan = 1;
tp_features.second_fan_ctl = 1;
pr_info("secondary fan control detected & enabled\n");
+ } else {
+ /* Fan not auto-detected */
+ tp_features.second_fan = 0;
+ if (quirks & TPACPI_FAN_2FAN) {
+ tp_features.second_fan = 1;
+ pr_info("secondary fan support enabled\n");
+ }
+ if (quirks & TPACPI_FAN_2CTL) {
+ tp_features.second_fan = 1;
+ tp_features.second_fan_ctl = 1;
+ pr_info("secondary fan control enabled\n");
+ }
}
-
} else {
pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
return -ENODEV;
@@ -11455,6 +11590,10 @@ static void thinkpad_acpi_module_exit(void)
tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+#ifdef CONFIG_SUSPEND
+ if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio)
+ acpi_unregister_lps0_dev(&thinkpad_acpi_s2idle_dev_ops);
+#endif
if (tpacpi_hwmon)
hwmon_device_unregister(tpacpi_hwmon);
if (tp_features.sensors_pdrv_registered)
@@ -11496,6 +11635,7 @@ static void thinkpad_acpi_module_exit(void)
static int __init thinkpad_acpi_module_init(void)
{
+ const struct dmi_system_id *dmi_id;
int ret, i;
tpacpi_lifecycle = TPACPI_LIFE_INIT;
@@ -11535,6 +11675,10 @@ static int __init thinkpad_acpi_module_init(void)
return -ENODEV;
}
+ dmi_id = dmi_first_match(fwbug_list);
+ if (dmi_id)
+ tp_features.quirks = dmi_id->driver_data;
+
/* Device initialization */
tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
NULL, 0);
@@ -11623,6 +11767,13 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.input_device_registered = 1;
}
+#ifdef CONFIG_SUSPEND
+ if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio) {
+ if (!acpi_register_lps0_dev(&thinkpad_acpi_s2idle_dev_ops))
+ pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+ (dmi_id && dmi_id->ident) ? dmi_id->ident : "");
+ }
+#endif
return 0;
}
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ea02c8dcd748..d925cb137e12 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = samsung_sdi_battery_get_info(&psy->dev, value, &info);
if (!err)
goto out_ret_pointer;
+ else if (err == -ENODEV)
+ /*
+ * Device does not have a static battery.
+ * Proceed to look for a simple battery.
+ */
+ err = 0;
if (strcmp("simple-battery", value)) {
err = -ENODEV;
diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c
index 9d59f277f519..b33daab798b9 100644
--- a/drivers/power/supply/samsung-sdi-battery.c
+++ b/drivers/power/supply/samsung-sdi-battery.c
@@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
.constant_charge_current_max_ua = 900000,
.constant_charge_voltage_max_uv = 4200000,
.charge_term_current_ua = 200000,
+ .charge_restart_voltage_uv = 4170000,
.maintenance_charge = samsung_maint_charge_table,
.maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
.alert_low_temp_charge_current_ua = 300000,
@@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
.constant_charge_current_max_ua = 1500000,
.constant_charge_voltage_max_uv = 4350000,
.charge_term_current_ua = 120000,
+ .charge_restart_voltage_uv = 4300000,
.maintenance_charge = samsung_maint_charge_table,
.maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
.alert_low_temp_charge_current_ua = 300000,
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 0feaa4b45317..860672d6a03c 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -300,7 +300,7 @@ struct ptp_ocp {
struct platform_device *spi_flash;
struct clk_hw *i2c_clk;
struct timer_list watchdog;
- const struct ocp_attr_group *attr_tbl;
+ const struct attribute_group **attr_group;
const struct ptp_ocp_eeprom_map *eeprom_map;
struct dentry *debug_root;
time64_t gnss_lost;
@@ -841,7 +841,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
}
static void
-ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
{
struct timespec64 ts;
unsigned long flags;
@@ -850,7 +850,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
spin_lock_irqsave(&bp->lock, flags);
err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
if (likely(!err)) {
- timespec64_add_ns(&ts, delta_ns);
+ set_normalized_timespec64(&ts, ts.tv_sec,
+ ts.tv_nsec + delta_ns);
__ptp_ocp_settime_locked(bp, &ts);
}
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1557,7 +1558,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
if (!s->start) {
/* roundup() does not work on 32-bit systems */
- s->start = DIV_ROUND_UP_ULL(start_ns, s->period);
+ s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
s->start = ktime_add(s->start, s->phase);
}
@@ -1836,6 +1837,42 @@ ptp_ocp_signal_init(struct ptp_ocp *bp)
}
static void
+ptp_ocp_attr_group_del(struct ptp_ocp *bp)
+{
+ sysfs_remove_groups(&bp->dev.kobj, bp->attr_group);
+ kfree(bp->attr_group);
+}
+
+static int
+ptp_ocp_attr_group_add(struct ptp_ocp *bp,
+ const struct ocp_attr_group *attr_tbl)
+{
+ int count, i;
+ int err;
+
+ count = 0;
+ for (i = 0; attr_tbl[i].cap; i++)
+ if (attr_tbl[i].cap & bp->fw_cap)
+ count++;
+
+ bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
+ GFP_KERNEL);
+ if (!bp->attr_group)
+ return -ENOMEM;
+
+ count = 0;
+ for (i = 0; attr_tbl[i].cap; i++)
+ if (attr_tbl[i].cap & bp->fw_cap)
+ bp->attr_group[count++] = attr_tbl[i].group;
+
+ err = sysfs_create_groups(&bp->dev.kobj, bp->attr_group);
+ if (err)
+ bp->attr_group[0] = NULL;
+
+ return err;
+}
+
+static void
ptp_ocp_sma_init(struct ptp_ocp *bp)
{
u32 reg;
@@ -1904,7 +1941,6 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->flash_start = 1024 * 4096;
bp->eeprom_map = fb_eeprom_map;
bp->fw_version = ioread32(&bp->image->version);
- bp->attr_tbl = fb_timecard_groups;
bp->fw_cap = OCP_CAP_BASIC;
ver = bp->fw_version & 0xffff;
@@ -1918,6 +1954,10 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
ptp_ocp_sma_init(bp);
ptp_ocp_signal_init(bp);
+ err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
+ if (err)
+ return err;
+
err = ptp_ocp_fb_set_pins(bp);
if (err)
return err;
@@ -3388,7 +3428,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
{
struct pps_device *pps;
char buf[32];
- int i, err;
if (bp->gnss_port != -1) {
sprintf(buf, "ttyS%d", bp->gnss_port);
@@ -3413,14 +3452,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
if (pps)
ptp_ocp_symlink(bp, pps->dev, "pps");
- for (i = 0; bp->attr_tbl[i].cap; i++) {
- if (!(bp->attr_tbl[i].cap & bp->fw_cap))
- continue;
- err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
- if (err)
- return err;
- }
-
ptp_ocp_debugfs_add_device(bp);
return 0;
@@ -3492,15 +3523,11 @@ static void
ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
{
struct device *dev = &bp->dev;
- int i;
sysfs_remove_link(&dev->kobj, "ttyGNSS");
sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps");
- if (bp->attr_tbl)
- for (i = 0; bp->attr_tbl[i].cap; i++)
- sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
}
static void
@@ -3510,6 +3537,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
+ ptp_ocp_attr_group_del(bp);
if (timer_pending(&bp->watchdog))
del_timer_sync(&bp->watchdog);
if (bp->ts0)
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
index 05147d2c3842..485e58b264c0 100644
--- a/drivers/regulator/atc260x-regulator.c
+++ b/drivers/regulator/atc260x-regulator.c
@@ -292,6 +292,7 @@ enum atc2603c_reg_ids {
.bypass_mask = BIT(5), \
.active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
.active_discharge_mask = BIT(1), \
+ .active_discharge_on = BIT(1), \
.owner = THIS_MODULE, \
}
diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
index f21e3f8b21f2..8e13dea354a2 100644
--- a/drivers/regulator/rtq2134-regulator.c
+++ b/drivers/regulator/rtq2134-regulator.c
@@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = {
.enable_mask = RTQ2134_VOUTEN_MASK, \
.active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
.active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+ .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \
.ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
.ramp_mask = RTQ2134_RSPUP_MASK, \
.ramp_delay_table = rtq2134_buck_ramp_delay_table, \
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index cadea0344486..40befdd9dfa9 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -82,6 +82,35 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
.min_uV = 2400000,
.uV_step = 100000,
.enable_time = 3000,
+ .off_on_delay = 36000,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = 2,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+ .vsel_reg = WM8994_LDO_2,
+ .vsel_mask = WM8994_LDO2_VSEL_MASK,
+ .ops = &wm8994_ldo2_ops,
+ .enable_time = 3000,
+ .off_on_delay = 36000,
+ .owner = THIS_MODULE,
+ },
+};
+
+static const struct regulator_desc wm8958_ldo_desc[] = {
+ {
+ .name = "LDO1",
+ .id = 1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+ .vsel_reg = WM8994_LDO_1,
+ .vsel_mask = WM8994_LDO1_VSEL_MASK,
+ .ops = &wm8994_ldo1_ops,
+ .min_uV = 2400000,
+ .uV_step = 100000,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
{
@@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
* regulator core and we need not worry about it on the
* error path.
*/
- ldo->regulator = devm_regulator_register(&pdev->dev,
- &wm8994_ldo_desc[id],
- &config);
+ if (ldo->wm8994->type == WM8994) {
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8994_ldo_desc[id],
+ &config);
+ } else {
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8958_ldo_desc[id],
+ &config);
+ }
+
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 1e8315038850..a8dde4606360 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get reset\n");
- reset_control_deassert(priv->rstc);
+ error = reset_control_deassert(priv->rstc);
+ if (error)
+ return error;
priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
priv->rcdev.of_reset_n_cells = 1;
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 24d3395964cc..4c5bba52b105 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 5b3e4da63406..5252ce4cbda4 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -370,6 +370,23 @@ CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
sun8i_h3_rtc_clk_init);
+static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
+ .rc_osc_rate = 16000000,
+ .fixed_prescaler = 32,
+ .has_prescaler = 1,
+ .has_out_clk = 1,
+ .export_iosc = 1,
+ .has_losc_en = 1,
+ .has_auto_swt = 1,
+};
+
+static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
+{
+ sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
+}
+CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
+ sun50i_h6_rtc_clk_init);
+
/*
* The R40 user manual is self-conflicting on whether the prescaler is
* fixed or configurable. The clock diagram shows it as fixed, but there
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8e87a31e329d..ba6d78789660 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1422,6 +1422,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
if (!cqr->lpm)
cqr->lpm = dasd_path_get_opm(device);
}
+ /*
+ * remember the amount of formatted tracks to prevent double format on
+ * ESE devices
+ */
+ if (cqr->block)
+ cqr->trkcount = atomic_read(&cqr->block->trkcount);
+
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
@@ -1639,6 +1646,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
unsigned long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
+ struct request *req;
u8 *sense = NULL;
int expires;
@@ -1739,7 +1747,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
if (dasd_ese_needs_format(cqr->block, irb)) {
- if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
+ req = dasd_get_callback_data(cqr);
+ if (!req) {
+ cqr->status = DASD_CQR_ERROR;
+ return;
+ }
+ if (rq_data_dir(req) == READ) {
device->discipline->ese_read(cqr, irb);
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
@@ -2765,8 +2778,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
* complete a request partially.
*/
if (proc_bytes) {
- blk_update_request(req, BLK_STS_OK,
- blk_rq_bytes(req) - proc_bytes);
+ blk_update_request(req, BLK_STS_OK, proc_bytes);
blk_mq_requeue_request(req, true);
} else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 8410a25a65c1..836838f7d686 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1480,7 +1480,7 @@ static int dasd_eckd_pe_handler(struct dasd_device *device,
{
struct pe_handler_work_data *data;
- data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
+ data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
if (!data) {
if (mutex_trylock(&dasd_pe_handler_mutex)) {
data = pe_handler_worker;
@@ -1488,9 +1488,6 @@ static int dasd_eckd_pe_handler(struct dasd_device *device,
} else {
return -ENOMEM;
}
- } else {
- memset(data, 0, sizeof(*data));
- data->isglobal = 0;
}
INIT_WORK(&data->worker, do_pe_handler_work);
dasd_get_device(device);
@@ -3083,13 +3080,24 @@ static int dasd_eckd_format_device(struct dasd_device *base,
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
- struct dasd_block *block)
+ struct dasd_ccw_req *cqr)
{
+ struct dasd_block *block = cqr->block;
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
+ if (cqr->trkcount != atomic_read(&block->trkcount)) {
+ /*
+ * The number of formatted tracks has changed after request
+ * start and we can not tell if the current track was involved.
+ * To avoid data corruption treat it as if the current track is
+ * involved
+ */
+ rc = true;
+ goto out;
+ }
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
@@ -3109,6 +3117,7 @@ static void clear_format_track(struct dasd_format_entry *format,
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
+ atomic_inc(&block->trkcount);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
@@ -3145,7 +3154,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
sector_t curr_trk;
int rc;
- req = cqr->callback_data;
+ req = dasd_get_callback_data(cqr);
block = cqr->block;
base = block->base;
private = base->private;
@@ -3170,8 +3179,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
- if (test_and_set_format_track(format, block))
+ if (test_and_set_format_track(format, cqr)) {
+ /* this is no real error so do not count down retries */
+ cqr->retries++;
return ERR_PTR(-EEXIST);
+ }
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
@@ -3270,12 +3282,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
cqr->proc_bytes = blk_count * blksize;
return 0;
}
- if (dst && !skip_block) {
- dst += off;
+ if (dst && !skip_block)
memset(dst, 0, blksize);
- } else {
+ else
skip_block--;
- }
+ dst += blksize;
blk_count++;
}
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index e084f4dedddd..60be7f7bf2d1 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -782,7 +782,6 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
q->limits.discard_granularity = logical_block_size;
- q->limits.discard_alignment = PAGE_SIZE;
/* Calculate max_discard_sectors and make it PAGE aligned */
max_bytes = USHRT_MAX * logical_block_size;
@@ -791,7 +790,6 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static int dasd_fba_pe_handler(struct dasd_device *device,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3b7af00a7825..83b918b84b4a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -187,6 +187,7 @@ struct dasd_ccw_req {
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
unsigned int proc_bytes; /* bytes for partial completion */
+ unsigned int trkcount; /* count formatted tracks */
};
/*
@@ -610,6 +611,7 @@ struct dasd_block {
struct list_head format_list;
spinlock_t format_lock;
+ atomic_t trkcount;
};
struct dasd_attention_data {
@@ -756,6 +758,18 @@ dasd_check_blocksize(int bsize)
return 0;
}
+/*
+ * return the callback data of the original request in case there are
+ * ERP requests build on top of it
+ */
+static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
+{
+ while (cqr->refers)
+ cqr = cqr->refers;
+
+ return cqr->callback_data;
+}
+
/* externals in dasd.c */
#define DASD_PROFILE_OFF 0
#define DASD_PROFILE_ON 1
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 88abfb5e8045..8ac213a55141 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -626,8 +626,6 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
ctcm_clear_busy_do(dev);
}
- kfree(mpcginfo);
-
return;
}
@@ -1192,10 +1190,10 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
/* mpcginfo only used for non-data transfers */
- kfree(mpcginfo);
if (do_debug_data)
ctcmpc_dump_skb(pskb, -8);
}
+ kfree(mpcginfo);
}
done:
@@ -1977,7 +1975,6 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
}
break;
}
- kfree(mpcginfo);
CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
@@ -2038,7 +2035,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
mpc_validate_xid(mpcginfo);
break;
}
- kfree(mpcginfo);
return;
}
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index ded1930a00b2..e3813a7aa5e6 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -39,11 +39,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
struct ctcm_priv *priv = dev_get_drvdata(dev);
int rc;
- ndev = priv->channel[CTCM_READ]->netdev;
- if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+ if (!(priv && priv->channel[CTCM_READ] &&
+ priv->channel[CTCM_READ]->netdev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
+ ndev = priv->channel[CTCM_READ]->netdev;
rc = kstrtouint(buf, 0, &bs1);
if (rc)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index bab9b34926c6..84c8981317b4 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1736,10 +1736,11 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
lcs_schedule_recovery(card);
break;
case LCS_CMD_STOPLAN:
- pr_warn("Stoplan for %s initiated by LGW\n",
- card->dev->name);
- if (card->dev)
+ if (card->dev) {
+ pr_warn("Stoplan for %s initiated by LGW\n",
+ card->dev->name);
netif_carrier_off(card->dev);
+ }
break;
default:
LCS_DBF_TEXT(5, trace, "noLGWcmd");
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 5f554a3a0f62..caeebfb67149 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -317,14 +317,18 @@ enum {
};
struct aha152x_cmd_priv {
- struct scsi_pointer scsi_pointer;
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int sent_command;
+ int phase;
};
-static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
{
- struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
-
- return &acmd->scsi_pointer;
+ return scsi_cmd_priv(cmd);
}
MODULE_AUTHOR("Jürgen Fischer");
@@ -890,17 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
{
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- scsi_pointer->phase |= 1 << 16;
+ acp->phase |= 1 << 16;
- if (scsi_pointer->phase & selecting) {
+ if (acp->phase & selecting) {
SETPORT(SSTAT1, SELTO);
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
SETPORT(SIMODE1, ENSELTIMO);
} else {
- SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
}
} else if(STATE==seldi) {
@@ -924,17 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
struct completion *complete, int phase)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
+ struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- scsi_pointer->phase = not_issued | phase;
- scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */
- scsi_pointer->Message = 0;
- scsi_pointer->have_data_in = 0;
- scsi_pointer->sent_command = 0;
+ acp->phase = not_issued | phase;
+ acp->status = 0x1; /* Illegal status by SCSI standard */
+ acp->message = 0;
+ acp->sent_command = 0;
- if (scsi_pointer->phase & (resetting | check_condition)) {
+ if (acp->phase & (resetting | check_condition)) {
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
return FAILED;
@@ -957,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
- scsi_pointer->ptr = NULL;
- scsi_pointer->this_residual = 0;
+ acp->ptr = NULL;
+ acp->this_residual = 0;
scsi_set_resid(SCpnt, 0);
- scsi_pointer->buffer = NULL;
+ acp->buffer = NULL;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- scsi_pointer->buffer = scsi_sglist(SCpnt);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = scsi_sglist(SCpnt);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
DO_LOCK(flags);
@@ -1015,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
{
- if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
+ if (aha152x_priv(SCpnt)->phase & resetting)
reset_done(SCpnt);
else
scsi_done(SCpnt);
@@ -1101,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
DO_LOCK(flags);
- if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
+ if (aha152x_priv(SCpnt)->phase & resetted) {
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
@@ -1395,31 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRBUSFREE);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
#if defined(AHA152X_STAT)
action++;
#endif
- scsi_pointer->phase &= ~syncneg;
+ acp->phase &= ~syncneg;
- if (scsi_pointer->phase & completed) {
+ if (acp->phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, scsi_pointer->Status, DID_OK);
+ done(shpnt, acp->status, DID_OK);
- } else if (scsi_pointer->phase & aborted) {
- done(shpnt, scsi_pointer->Status, DID_ABORT);
+ } else if (acp->phase & aborted) {
+ done(shpnt, acp->status, DID_ABORT);
- } else if (scsi_pointer->phase & resetted) {
- done(shpnt, scsi_pointer->Status, DID_RESET);
+ } else if (acp->phase & resetted) {
+ done(shpnt, acp->status, DID_RESET);
- } else if (scsi_pointer->phase & disconnected) {
+ } else if (acp->phase & disconnected) {
/* target sent DISCONNECT */
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
- scsi_pointer->phase |= 1 << 16;
+ acp->phase |= 1 << 16;
CURRENT_SC = NULL;
} else {
@@ -1438,24 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
action++;
#endif
- if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
+ if (aha152x_priv(DONE_SC)->phase & check_condition) {
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
struct aha152x_scdata *sc = SCDATA(cmd);
scsi_eh_restore_cmnd(cmd, &sc->ses);
- aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
+ aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- } else if (aha152x_scsi_pointer(DONE_SC)->Status ==
- SAM_STAT_CHECK_CONDITION) {
+ } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
- if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
+ if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
struct aha152x_scdata *sc;
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1480,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
+ if (!(aha152x_priv(ptr)->phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
@@ -1503,13 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
DO_UNLOCK(flags);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
#if defined(AHA152X_STAT)
action++;
#endif
- scsi_pointer->phase |= selecting;
+ acp->phase |= selecting;
/* clear selection timeout */
SETPORT(SSTAT1, SELTO);
@@ -1537,13 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
*/
static void seldo_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
SETPORT(SCSISIG, 0);
SETPORT(SSTAT1, CLRBUSFREE);
SETPORT(SSTAT1, CLRPHASECHG);
- scsi_pointer->phase &= ~(selecting | not_issued);
+ acp->phase &= ~(selecting | not_issued);
SETPORT(SCSISEQ, 0);
@@ -1558,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
- if (scsi_pointer->phase & aborting) {
+ if (acp->phase & aborting) {
ADDMSGO(ABORT);
- } else if (scsi_pointer->phase & resetting) {
+ } else if (acp->phase & resetting) {
ADDMSGO(BUS_DEVICE_RESET);
} else if (SYNCNEG==0 && SYNCHRONOUS) {
- scsi_pointer->phase |= syncneg;
+ acp->phase |= syncneg;
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
SYNCNEG=1; /* negotiation in progress */
}
@@ -1578,7 +1577,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
*/
static void selto_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp;
SETPORT(SCSISEQ, 0);
SETPORT(SSTAT1, CLRSELTIMO);
@@ -1586,9 +1585,10 @@ static void selto_run(struct Scsi_Host *shpnt)
if (!CURRENT_SC)
return;
- scsi_pointer->phase &= ~selecting;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->phase &= ~selecting;
- if (scsi_pointer->phase & aborted)
+ if (acp->phase & aborted)
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
@@ -1616,10 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRPHASECHG);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- if (!(scsi_pointer->phase & not_issued))
+ if (!(acp->phase & not_issued))
scmd_printk(KERN_ERR, CURRENT_SC,
"command should not have been issued yet\n");
@@ -1676,7 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
static void msgi_run(struct Scsi_Host *shpnt)
{
for(;;) {
- struct scsi_pointer *scsi_pointer;
+ struct aha152x_cmd_priv *acp;
int sstat1 = GETPORT(SSTAT1);
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1714,9 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- scsi_pointer->Message = MSGI(0);
- scsi_pointer->phase &= ~disconnected;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
+ acp->phase &= ~disconnected;
MSGILEN=0;
@@ -1724,8 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- scsi_pointer->Message = MSGI(0);
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
switch (MSGI(0)) {
case DISCONNECT:
@@ -1733,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
scmd_printk(KERN_WARNING, CURRENT_SC,
"target was not allowed to disconnect\n");
- scsi_pointer->phase |= disconnected;
+ acp->phase |= disconnected;
break;
case COMMAND_COMPLETE:
- scsi_pointer->phase |= completed;
+ acp->phase |= completed;
break;
case MESSAGE_REJECT:
@@ -1867,11 +1866,9 @@ static void msgi_end(struct Scsi_Host *shpnt)
*/
static void msgo_init(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-
if(MSGOLEN==0) {
- if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
- SYNCRATE==0) {
+ if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+ SYNCNEG == 2 && SYNCRATE == 0) {
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
} else {
scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1888,7 +1885,7 @@ static void msgo_init(struct Scsi_Host *shpnt)
*/
static void msgo_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
while(MSGO_I<MSGOLEN) {
if (TESTLO(SSTAT0, SPIORDY))
@@ -1901,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
if (MSGO(MSGO_I) & IDENTIFY_BASE)
- scsi_pointer->phase |= identified;
+ acp->phase |= identified;
if (MSGO(MSGO_I)==ABORT)
- scsi_pointer->phase |= aborted;
+ acp->phase |= aborted;
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
- scsi_pointer->phase |= resetted;
+ acp->phase |= resetted;
SETPORT(SCSIDAT, MSGO(MSGO_I++));
}
@@ -1936,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
*/
static void cmd_init(struct Scsi_Host *shpnt)
{
- if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
+ if (aha152x_priv(CURRENT_SC)->sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
@@ -1967,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
"command sent incompletely (%d/%d)\n",
CMD_I, CURRENT_SC->cmd_len);
else
- aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
+ aha152x_priv(CURRENT_SC)->sent_command++;
}
/*
@@ -1979,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SPIORDY))
return;
- aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
+ aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
}
@@ -2003,7 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
static void datai_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer;
+ struct aha152x_cmd_priv *acp;
unsigned long the_time;
int fifodata, data_count;
@@ -2041,36 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
fifodata = GETPORT(FIFOSTAT);
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- if (scsi_pointer->this_residual > 0) {
- while (fifodata > 0 && scsi_pointer->this_residual > 0) {
- data_count = fifodata > scsi_pointer->this_residual ?
- scsi_pointer->this_residual :
- fifodata;
+ acp = aha152x_priv(CURRENT_SC);
+ if (acp->this_residual > 0) {
+ while (fifodata > 0 && acp->this_residual > 0) {
+ data_count = fifodata > acp->this_residual ?
+ acp->this_residual : fifodata;
fifodata -= data_count;
if (data_count & 1) {
SETPORT(DMACNTRL0, ENDMA|_8BIT);
- *scsi_pointer->ptr++ = GETPORT(DATAPORT);
- scsi_pointer->this_residual--;
+ *acp->ptr++ = GETPORT(DATAPORT);
+ acp->this_residual--;
DATA_LEN++;
SETPORT(DMACNTRL0, ENDMA);
}
if (data_count > 1) {
data_count >>= 1;
- insw(DATAPORT, scsi_pointer->ptr, data_count);
- scsi_pointer->ptr += 2 * data_count;
- scsi_pointer->this_residual -= 2 * data_count;
+ insw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
DATA_LEN += 2 * data_count;
}
- if (scsi_pointer->this_residual == 0 &&
- !sg_is_last(scsi_pointer->buffer)) {
+ if (acp->this_residual == 0 &&
+ !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
}
} else if (fifodata > 0) {
@@ -2138,15 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
static void datao_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
unsigned long the_time;
int data_count;
/* until phase changes or all data sent */
- while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
+ while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
data_count = 128;
- if (data_count > scsi_pointer->this_residual)
- data_count = scsi_pointer->this_residual;
+ if (data_count > acp->this_residual)
+ data_count = acp->this_residual;
if(TESTLO(DMASTAT, DFIFOEMP)) {
scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2157,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
if(data_count & 1) {
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
- SETPORT(DATAPORT, *scsi_pointer->ptr++);
- scsi_pointer->this_residual--;
+ SETPORT(DATAPORT, *acp->ptr++);
+ acp->this_residual--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
if(data_count > 1) {
data_count >>= 1;
- outsw(DATAPORT, scsi_pointer->ptr, data_count);
- scsi_pointer->ptr += 2 * data_count;
- scsi_pointer->this_residual -= 2 * data_count;
+ outsw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if (scsi_pointer->this_residual == 0 &&
- !sg_is_last(scsi_pointer->buffer)) {
+ if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
the_time=jiffies + 100*HZ;
@@ -2192,7 +2187,7 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
if(TESTLO(DMASTAT, DFIFOEMP)) {
u32 datao_cnt = GETSTCNT();
@@ -2211,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
sg = sg_next(sg);
}
- scsi_pointer->buffer = sg;
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
- scsi_pointer->this_residual = scsi_pointer->buffer->length -
- done;
+ acp->buffer = sg;
+ acp->ptr = SG_ADDRESS(acp->buffer) + done;
+ acp->this_residual = acp->buffer->length - done;
}
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2229,7 +2223,6 @@ static void datao_end(struct Scsi_Host *shpnt)
*/
static int update_state(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
int dataphase=0;
unsigned int stat0 = GETPORT(SSTAT0);
unsigned int stat1 = GETPORT(SSTAT1);
@@ -2244,7 +2237,7 @@ static int update_state(struct Scsi_Host *shpnt)
} else if (stat0 & SELDI && PREVSTATE == busfree) {
STATE=seldi;
} else if (stat0 & SELDO && CURRENT_SC &&
- (scsi_pointer->phase & selecting)) {
+ (aha152x_priv(CURRENT_SC)->phase & selecting)) {
STATE=seldo;
} else if(stat1 & SELTO) {
STATE=selto;
@@ -2376,8 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(SXFRCTL0, CH1);
SETPORT(DMACNTRL0, 0);
if(CURRENT_SC)
- aha152x_scsi_pointer(CURRENT_SC)->phase &=
- ~spiordy;
+ aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
}
/*
@@ -2399,8 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(DMACNTRL0, 0);
SETPORT(SXFRCTL0, CH1|SPIOEN);
if(CURRENT_SC)
- aha152x_scsi_pointer(CURRENT_SC)->phase |=
- spiordy;
+ aha152x_priv(CURRENT_SC)->phase |= spiordy;
}
/*
@@ -2490,7 +2481,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
*/
static void show_command(struct scsi_cmnd *ptr)
{
- const int phase = aha152x_scsi_pointer(ptr)->phase;
+ const int phase = aha152x_priv(ptr)->phase;
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
@@ -2538,8 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
- const int phase = scsi_pointer->phase;
+ struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+ const int phase = acp->phase;
int i;
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2549,8 +2540,8 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
- scsi_get_resid(ptr), scsi_pointer->this_residual,
- sg_nents(scsi_pointer->buffer) - 1);
+ scsi_get_resid(ptr), acp->this_residual,
+ sg_nents(acp->buffer) - 1);
if (phase & not_issued)
seq_puts(m, "not issued|");
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 679a4fd13874..793fe19993a9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
/****************************** PCI-X definitions *****************************/
#define PCIXR_COMMAND 0x96
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 2f0bdb9225a4..5fad41b1ab58 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahd_compose_id(device,
vendor,
subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
* Record if this is an HP board.
*/
subvendor = ahd_pci_read_config(ahd->dev_softc,
- PCIR_SUBVEND_0, /*bytes*/2);
+ PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
if (subvendor == SUBID_HP)
ahd->flags |= AHD_HP_BOARD;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 4782a304e93c..51d9f4de0734 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
typedef enum
{
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index dab3a6d12c4d..2d4c85426dc3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 0103f811cc25..776544385598 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
ofld_kcqe->fcoe_conn_context_id);
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
goto ofld_cmpl_err;
}
/*
@@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ERR PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mismatch\n");
return;
}
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
goto enbl_cmpl_err;
}
if (!ofld_kcqe->completion_status)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5521469ce678..6c864b093ac9 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ printk(KERN_ALERT "conn destroy- error hba mismatch\n");
return;
}
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
return;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fe86fd61a995..15fbd09baa94 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8c7d4dda4cf2..4365d52c6430 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
- if (unlikely(!conn || conn->suspend_rx)) {
+ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
- conn ? conn->suspend_rx : 0xFF);
+ conn ? conn->flags : 0xFF);
return;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 37d06f993b76..1d9be771f3ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA:
- return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_AGAIN;
+ return BLK_STS_OK;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 461ef8a76c4c..4bda2f6cb352 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -442,7 +442,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_INTERNAL_ABORT:
hisi_sas_task_prep_abort(hisi_hba, slot);
break;
- fallthrough;
default:
return;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 80238e6a3c98..eee1a24f7e15 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -36,7 +36,7 @@
#define IBMVSCSIS_VERSION "v0.2"
-#define INITIAL_SRP_LIMIT 800
+#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d690d9cf7eb1..35589b6af90d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for io request object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for remote device object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
} else
dev_err(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received event 0x%x "
- "for remote device object 0x%0x that doesnt "
+ "for remote device object 0x%0x that doesn't "
"exist.\n",
__func__,
ihost,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d09926e6c8a8..797abf4f5399 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task;
itt_t itt;
- if (session->state == ISCSI_STATE_TERMINATE)
+ if (session->state == ISCSI_STATE_TERMINATE ||
+ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
return true;
}
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* Do this after dropping the extra ref because if this was a requeue
* it's removed from that list and cleanup_queued_task would miss it.
*/
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
/*
* Save the task and ref in case we weren't cleaning up this
* task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
flush_workqueue(ihost->workq);
}
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
iscsi_conn_queue_work(conn);
}
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
+ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
@@ -3045,7 +3048,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- memset(conn, 0, sizeof(*conn) + dd_size);
conn->dd_data = cls_conn->dd_data + sizeof(*conn);
conn->session = session;
@@ -3318,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
+
+ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
@@ -3330,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
/*
* Unblock xmitworker(), Login Phase will pass through.
*/
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2e9ffe3d1a55..883005757ddb 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
- if (unlikely(conn->suspend_rx)) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f0cf8ffdc5f3..0025760230e5 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode {
NHT_MODE,
};
+enum lpfc_hba_bit_flags {
+ FABRIC_COMANDS_BLOCKED,
+ HBA_PCI_ERR,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -1043,7 +1048,6 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
@@ -1350,7 +1354,6 @@ struct lpfc_hba {
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
-#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 96408cd6c4c8..9897a1aa387b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -670,3 +670,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ef6e8cd8c26a..872a26376ccb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1330,7 +1330,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
/* FLOGI needs to be 3 for WQE FCFI */
- ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1);
+ ct = SLI4_CT_FCFI;
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
/* Set the fcfi to the fcfi we registered with */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0144da30e3db..2b877dff5ed4 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
ndlp = rdata->pnode;
if (!rdata->pnode) {
- pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
- __func__, rport, rport->scsi_target_id);
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
return -EINVAL;
}
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d\n",
+ "load_flag x%x refcnt %d state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
- vport->load_flag, kref_read(&ndlp->kref));
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down.
* The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ /* clear the NLP_XPT_REGD if the node is not registered
+ * with nvme-fc
+ */
+ if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
/* Remove the node reference from remote_port_add now.
* The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%x load_flag x%x refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* Node is in dev loss. No further transaction. */
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_RECOVER_PORT:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
- free_evt = 0;
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
/* decrement the node reference count held for
* this queued work
*/
@@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
if (ha_copy & HA_ERATT) {
@@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_latt(phba);
/* Handle VMID Events */
- if (lpfc_is_vmid_enabled(phba)) {
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
if (phba->pport->work_port_events &
WORKER_CHECK_VMID_ISSUE_QFPA) {
lpfc_check_vmid_qfpa_issue(phba);
@@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
@@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any SLI3 firmware default rpi's */
- if (phba->sli_rev > LPFC_SLI_REV3)
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
goto skip_unreg_did;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%x XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
return;
}
@@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+ " XPT x%x refcnt %d\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
}
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -5371,6 +5422,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -6097,12 +6149,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index eed6464bd880..461d333b1b3a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1642,7 +1643,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
if (phba->link_state == LPFC_HBA_ERROR &&
- phba->hba_flag & HBA_PCI_ERR) {
+ test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1985,6 +1986,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
+ lpfc_sli_flush_io_rings(phba);
return;
}
@@ -2973,6 +2975,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
NLP_EVT_DEVICE_RM);
}
+ /* This is a special case flush to return all
+ * IOs before entering this loop. There are
+ * two points in the code where a flush is
+ * avoided if the FC_UNLOADING flag is set.
+ * one is in the multipool destroy,
+ * (this prevents a crash) and the other is
+ * in the nvme abort handler, ( also prevents
+ * a crash). Both of these exceptions are
+ * cases where the slot is still accessible.
+ * The flush here is only when the pci slot
+ * is offline.
+ */
+ if (vport->load_flag & FC_UNLOADING &&
+ pci_channel_offline(phba->pcidev))
+ lpfc_sli_flush_io_rings(vport->phba);
+
/* At this point, ALL ndlp's should be gone
* because of the previous NLP_EVT_DEVICE_RM.
* Lets wait for this to happen, if needed.
@@ -2985,7 +3003,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
+ LOG_DISCOVERY,
"0282 did:x%x ndlp:x%px "
"refcnt:%d xflags x%x nflag x%x\n",
ndlp->nlp_DID, (void *)ndlp,
@@ -3682,7 +3700,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
- int offline = 0;
+ int offline;
+ bool hba_pci_err;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3692,6 +3711,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
offline = pci_channel_offline(phba->pcidev);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
@@ -3715,11 +3735,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- if (offline) {
+ if (offline || hba_pci_err) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_UNREG_INP |
NLP_RPI_REGISTERED);
spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli_rpi_release(vports[i],
+ ndlp);
} else {
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -13354,8 +13377,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Abort all iocbs associated with the hba */
lpfc_sli_hba_iocb_abort(phba);
- /* Wait for completion of device XRI exchange busy */
- lpfc_sli4_xri_exchange_busy_wait(phba);
+ if (!pci_channel_offline(phba->pcidev))
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
if (phba->pport)
@@ -13374,15 +13398,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
- /* Unset the queues shared with the hardware then release all
- * allocated resources.
- */
- lpfc_sli4_queue_unset(phba);
- lpfc_sli4_queue_destroy(phba);
-
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* release all queue allocated resources. */
+ lpfc_sli4_queue_destroy(phba);
+
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled)
lpfc_sli4_ras_dma_free(phba);
@@ -14262,6 +14283,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
+ lpfc_sli4_prep_dev_for_reset(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -15057,24 +15079,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2826 PCI channel disable preparing for reset\n");
+ int offline = pci_channel_offline(phba->pcidev);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2826 PCI channel disable preparing for reset offline"
+ " %d\n", offline);
/* Block any management I/Os to the device */
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
+ /* HBA_PCI_ERR was set in io_error_detect */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
/* Flush all driver's outstanding I/Os as we are to reset */
lpfc_sli_flush_io_rings(phba);
+ lpfc_offline(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
- lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
}
@@ -15123,6 +15149,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ bool hba_pci_err;
switch (state) {
case pci_channel_io_normal:
@@ -15130,17 +15157,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Fatal error, prepare for slot reset */
- lpfc_sli4_prep_dev_for_reset(phba);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2832 Already handling PCI error "
+ "state: x%x\n", state);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- phba->hba_flag |= HBA_PCI_ERR;
+ set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15174,17 +15208,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
uint32_t intr_mode;
+ bool hba_pci_err;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
printk(KERN_ERR "lpfc: Cannot re-enable "
- "PCI device after reset.\n");
+ "PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
- phba->hba_flag &= ~HBA_PCI_ERR;
+ hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ dev_info(&pdev->dev,
+ "hba_pci_err was not set, recovering slot reset.\n");
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15198,6 +15236,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15239,8 +15279,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
lpfc_online(phba);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 1213a299f9aa..8d26f207ebd2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+
+ if (!vport || vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
if (qhandle == NULL)
return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
return -EINVAL;
remoteport = lpfc_rport->remoteport;
- if (!vport->localport)
+ if (!vport->localport ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -EINVAL;
lport = vport->localport->private;
@@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
+ if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq)
return -ENOMEM;
@@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
@@ -1516,7 +1525,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (unlikely(vport->load_flag & FC_UNLOADING)) {
+ if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
- if (!vport || !vport->localport ||
- !qp || !qp->io_wq)
+ if (!vport->localport || !qp || !qp->io_wq)
return;
pring = qp->io_wq->pring;
@@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
- if (!vport || !vport->localport ||
- vport->phba->hba_flag & HBA_PCI_ERR)
+ if (!vport->localport ||
+ test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ vport->load_flag & FC_UNLOADING)
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
- unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+ if (vport->load_flag & FC_UNLOADING)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3c132604fd91..f6b83853f7ee 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5528,7 +5528,9 @@ static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
{
struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
- return bio ? blkcg_get_fc_appid(bio) : NULL;
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
+ return NULL;
+ return blkcg_get_fc_appid(bio);
}
/**
@@ -5929,13 +5931,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
lpfc_cmd->waitq = &waitq;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
spin_unlock(&pring_s4->ring_lock);
- else
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+ lpfc_sli_abort_fcp_cmpl);
+ } else {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
-
- ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
- lpfc_sli_abort_fcp_cmpl);
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
+ }
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 20d40957a385..6adaf79e67cc 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2828,6 +2828,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ __lpfc_sli_rpi_release(vport, ndlp);
+}
+
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
@@ -3715,7 +3721,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflag;
u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
ulp_command = get_job_cmnd(phba, saveq);
ulp_status = get_job_ulpstatus(phba, saveq);
@@ -4052,10 +4066,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}
- spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
@@ -4536,42 +4548,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- LIST_HEAD(completions);
+ LIST_HEAD(tx_completions);
+ LIST_HEAD(txcmplq_completions);
struct lpfc_iocbq *iocb, *next_iocb;
+ int offline;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
+ offline = pci_channel_offline(phba->pcidev);
/* Error everything on txq and txcmplq
* First do the txq.
*/
if (phba->sli_rev >= LPFC_SLI_REV4) {
spin_lock_irq(&pring->ring_lock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
- spin_unlock_irq(&phba->hbalock);
+ if (offline) {
+ list_splice_init(&pring->txcmplq,
+ &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
+ spin_unlock_irq(&pring->ring_lock);
} else {
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+ if (offline) {
+ list_splice_init(&pring->txcmplq, &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
spin_unlock_irq(&phba->hbalock);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (offline) {
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ } else {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
@@ -4624,11 +4656,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_IOQ_FLUSH ||
- !phba->sli4_hba.hdwq) {
- spin_unlock_irq(&phba->hbalock);
- return;
- }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -10693,10 +10720,10 @@ __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
/* Words 0 - 2 */
bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
- bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
- bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->addr_low = bpl->addr_low;
+ bde->addr_high = bpl->addr_high;
bde->type_size = cpu_to_le32(xmit_len);
- bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
/* Word 3 */
cmdwqe->gen_req.request_payload_len = xmit_len;
@@ -10997,6 +11024,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
unsigned long iflags;
int rc;
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli_prep_wqe(phba, piocb);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e52f37e5d896..a4d3259b8c52 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.0"
+#define LPFC_DRIVER_VERSION "14.2.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 611871ef15b5..4919ea54b827 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2560,6 +2560,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LUN_VALID(sdev) \
+ (((sdev)->lun == 0) ? 1 : 0)
+
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 8bf72dbc33b7..db6793608447 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
+ } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return -ENXIO;
}
scan_target:
@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
+ if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return;
+ }
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b57f1803371e..538d2c0cd971 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5716,13 +5716,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
* Return: 1 if reply queues in a set have a same upper 32bits in their base
* memory address, else 0.
*/
-
static int
mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
{
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 0563078227de..a8dd14c91efd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
- if ((ioc->shost_recovery) || (ioc->config_cmds.status &
- MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ if (ioc->config_cmds.status & MPT3_CMD_RESET)
goto retry_config;
- issue_host_reset = 1;
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ issue_host_reset = 0;
+ r = -EFAULT;
+ } else
+ issue_host_reset = 1;
goto free_mem;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 00792767c620..7e476f50935b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_port *mpt3sas_port, *next;
unsigned long flags;
+ int port_id;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_port->hba_port);
}
+ port_id = sas_expander->port->port_id;
+
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent, sas_expander->port);
@@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
"expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
sas_expander->handle, (unsigned long long)
sas_expander->sas_address,
- sas_expander->port->port_id);
+ port_id);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7ac63eb5ccd3..2fde496fff5f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -647,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index c4a838635893..5d7dfefd6f6c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -192,10 +192,11 @@ struct sym53c500_data {
int fast_pio;
};
-static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd)
-{
- return scsi_cmd_priv(cmd);
-}
+struct sym53c500_cmd_priv {
+ int status;
+ int message;
+ int phase;
+};
enum Phase {
idle,
@@ -356,7 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
struct scsi_cmnd *curSC = data->current_SC;
- struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC);
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
int fast_pio = data->fast_pio;
spin_lock_irqsave(dev->host_lock, flags);
@@ -403,12 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
if (int_reg & 0x20) { /* Disconnect */
DEB(printk("SYM53C500: disconnect intr received\n"));
- if (scsi_pointer->phase != message_in) { /* Unexpected disconnect */
+ if (scp->phase != message_in) { /* Unexpected disconnect */
curSC->result = DID_NO_CONNECT << 16;
} else { /* Command complete, return status and message */
- curSC->result = (scsi_pointer->Status & 0xff) |
- ((scsi_pointer->Message & 0xff) << 8) |
- (DID_OK << 16);
+ curSC->result = (scp->status & 0xff) |
+ ((scp->message & 0xff) << 8) | (DID_OK << 16);
}
goto idle_out;
}
@@ -419,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- scsi_pointer->phase = data_out;
+ scp->phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -438,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- scsi_pointer->phase = data_in;
+ scp->phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -453,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
break;
case 0x02: /* COMMAND */
- scsi_pointer->phase = command_ph;
+ scp->phase = command_ph;
printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
break;
case 0x03: /* STATUS */
- scsi_pointer->phase = status_ph;
+ scp->phase = status_ph;
VDEB(printk("SYM53C500: Status phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -471,24 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x06: /* MESSAGE-OUT */
DEB(printk("SYM53C500: Message-Out phase\n"));
- scsi_pointer->phase = message_out;
+ scp->phase = message_out;
outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
outb(MSG_ACCEPT, port_base + CMD_REG);
break;
case 0x07: /* MESSAGE-IN */
VDEB(printk("SYM53C500: Message-In phase\n"));
- scsi_pointer->phase = message_in;
+ scp->phase = message_in;
- scsi_pointer->Status = inb(port_base + SCSI_FIFO);
- scsi_pointer->Message = inb(port_base + SCSI_FIFO);
+ scp->status = inb(port_base + SCSI_FIFO);
+ scp->message = inb(port_base + SCSI_FIFO);
VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
- DEB(printk("Status = %02x Message = %02x\n",
- scsi_pointer->Status, scsi_pointer->Message));
+ DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message));
- if (scsi_pointer->Message == SAVE_POINTERS ||
- scsi_pointer->Message == DISCONNECT) {
+ if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
outb(SET_ATN, port_base + CMD_REG); /* Reject message */
DEB(printk("Discarding SAVE_POINTERS message\n"));
}
@@ -500,7 +498,7 @@ out:
return IRQ_HANDLED;
idle_out:
- scsi_pointer->phase = idle;
+ scp->phase = idle;
scsi_done(curSC);
goto out;
}
@@ -548,7 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
- struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt);
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
int i;
int port_base = SCpnt->device->host->io_port;
struct sym53c500_data *data =
@@ -565,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
VDEB(printk("\n"));
data->current_SC = SCpnt;
- scsi_pointer->phase = command_ph;
- scsi_pointer->Status = 0;
- scsi_pointer->Message = 0;
+ scp->phase = command_ph;
+ scp->status = 0;
+ scp->message = 0;
/* We are locked here already by the mid layer */
REG0(port_base);
@@ -682,7 +680,7 @@ static struct scsi_host_template sym53c500_driver_template = {
.this_id = 7,
.sg_tablesize = 32,
.shost_groups = SYM53C500_shost_groups,
- .cmd_size = sizeof(struct scsi_pointer),
+ .cmd_size = sizeof(struct sym53c500_cmd_priv),
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index f90b707c190b..01c5e8ff4cc5 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
+
return 0;
}
@@ -1727,10 +1738,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 928532180d32..fd674ed1febe 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3182,124 +3182,6 @@ static int pmcraid_build_ioadl(
}
/**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- * none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
- sgl_free_order(sglist->scatterlist, sglist->order);
- kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- * pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
- struct pmcraid_sglist *sglist;
- int sg_size;
- int order;
-
- sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
- order = (sg_size > 0) ? get_order(sg_size) : 0;
-
- /* Allocate a scatter/gather list for the DMA */
- sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
- if (sglist == NULL)
- return NULL;
-
- sglist->order = order;
- sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
- &sglist->num_sg);
-
- return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
- struct pmcraid_sglist *sglist,
- void __user *buffer,
- u32 len,
- int direction
-)
-{
- struct scatterlist *sg;
- void *kaddr;
- int bsize_elem;
- int i;
- int rc = 0;
-
- /* Determine the actual number of bytes per element */
- bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
- sg = sglist->scatterlist;
-
- for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, bsize_elem);
-
- kunmap(page);
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- return -EFAULT;
- }
-
- sg->length = bsize_elem;
- }
-
- if (len % bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
-
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, len % bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
- kunmap(page);
-
- sg->length = len % bsize_elem;
- }
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
*
@@ -3454,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
return rc;
}
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd : pointer to struct pmcraid_cmd
- * @buflen : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = NULL;
- struct scatterlist *sg = NULL;
- struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
- struct pmcraid_ioadl_desc *ioadl;
- int i;
-
- sglist = pmcraid_alloc_sglist(buflen);
-
- if (!sglist) {
- pmcraid_err("can't allocate memory for passthrough SGls\n");
- return -ENOMEM;
- }
-
- sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg, direction);
-
- if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
- dev_err(&cmd->drv_inst->pdev->dev,
- "Failed to map passthrough buffer!\n");
- pmcraid_free_sglist(sglist);
- return -EIO;
- }
-
- cmd->sglist = sglist;
- ioarcb->request_flags0 |= NO_LINK_DESCS;
-
- ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
- /* Initialize IOADL descriptor addresses */
- for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
- ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
- ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
- ioadl[i].flags = 0;
- }
-
- /* setup the last descriptor */
- ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
- return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = cmd->sglist;
-
- if (buflen > 0) {
- dma_unmap_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg,
- direction);
- pmcraid_free_sglist(sglist);
- cmd->sglist = NULL;
- }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @ioctl_cmd: ioctl code
- * @buflen: unused
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
- struct pmcraid_instance *pinstance,
- unsigned int ioctl_cmd,
- unsigned int buflen,
- void __user *arg
-)
-{
- struct pmcraid_passthrough_ioctl_buffer *buffer;
- struct pmcraid_ioarcb *ioarcb;
- struct pmcraid_cmd *cmd;
- struct pmcraid_cmd *cancel_cmd;
- void __user *request_buffer;
- unsigned long request_offset;
- unsigned long lock_flags;
- void __user *ioasa;
- u32 ioasc;
- int request_size;
- int buffer_size;
- u8 direction;
- int rc = 0;
-
- /* If IOA reset is in progress, wait 10 secs for reset to complete */
- if (pinstance->ioa_reset_in_progress) {
- rc = wait_event_interruptible_timeout(
- pinstance->reset_wait_q,
- !pinstance->ioa_reset_in_progress,
- msecs_to_jiffies(10000));
-
- if (!rc)
- return -ETIMEDOUT;
- else if (rc < 0)
- return -ERESTARTSYS;
- }
-
- /* If adapter is not in operational state, return error */
- if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
- pmcraid_err("IOA is not operational\n");
- return -ENOTTY;
- }
-
- buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
-
- if (!buffer) {
- pmcraid_err("no memory for passthrough buffer\n");
- return -ENOMEM;
- }
-
- request_offset =
- offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
- request_buffer = arg + request_offset;
-
- rc = copy_from_user(buffer, arg,
- sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
- ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
- if (rc) {
- pmcraid_err("ioctl: can't copy passthrough buffer\n");
- rc = -EFAULT;
- goto out_free_buffer;
- }
-
- request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
- if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
- direction = DMA_TO_DEVICE;
- } else {
- direction = DMA_FROM_DEVICE;
- }
-
- if (request_size < 0) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- /* check if we have any additional command parameters */
- if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
- > PMCRAID_ADD_CMD_PARAM_LEN) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- cmd = pmcraid_get_free_cmd(pinstance);
-
- if (!cmd) {
- pmcraid_err("free command block is not available\n");
- rc = -ENOMEM;
- goto out_free_buffer;
- }
-
- cmd->scsi_cmd = NULL;
- ioarcb = &(cmd->ioa_cb->ioarcb);
-
- /* Copy the user-provided IOARCB stuff field by field */
- ioarcb->resource_handle = buffer->ioarcb.resource_handle;
- ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
- ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
- ioarcb->request_type = buffer->ioarcb.request_type;
- ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
- ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
- memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
- if (buffer->ioarcb.add_cmd_param_length) {
- ioarcb->add_cmd_param_length =
- buffer->ioarcb.add_cmd_param_length;
- ioarcb->add_cmd_param_offset =
- buffer->ioarcb.add_cmd_param_offset;
- memcpy(ioarcb->add_data.u.add_cmd_params,
- buffer->ioarcb.add_data.u.add_cmd_params,
- le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
- }
-
- /* set hrrq number where the IOA should respond to. Note that all cmds
- * generated internally uses hrrq_id 0, exception to this is the cmd
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
-
- if (request_size) {
- rc = pmcraid_build_passthrough_ioadls(cmd,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_cmd;
- }
- }
-
- /* If data is being written into the device, copy the data from user
- * buffers
- */
- if (direction == DMA_TO_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- goto out_free_sglist;
- }
- }
-
- /* passthrough ioctl is a blocking command so, put the user to sleep
- * until timeout. Note that a timeout value of 0 means, do timeout.
- */
- cmd->cmd_done = pmcraid_internal_done;
- init_completion(&cmd->wait_for_completion);
- cmd->completion_req = 1;
-
- pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0],
- le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- _pmcraid_fire_command(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- /* NOTE ! Remove the below line once abort_task is implemented
- * in firmware. This line disables ioctl command timeout handling logic
- * similar to IO command timeout handling, making ioctl commands to wait
- * until the command completion regardless of timeout value specified in
- * ioarcb
- */
- buffer->ioarcb.cmd_timeout = 0;
-
- /* If command timeout is specified put caller to wait till that time,
- * otherwise it would be blocking wait. If command gets timed out, it
- * will be aborted.
- */
- if (buffer->ioarcb.cmd_timeout == 0) {
- wait_for_completion(&cmd->wait_for_completion);
- } else if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
- pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0]);
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- cancel_cmd = pmcraid_abort_cmd(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- if (cancel_cmd) {
- wait_for_completion(&cancel_cmd->wait_for_completion);
- ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
- pmcraid_return_cmd(cancel_cmd);
-
- /* if abort task couldn't find the command i.e it got
- * completed prior to aborting, return good completion.
- * if command got aborted successfully or there was IOA
- * reset due to abort task itself getting timedout then
- * return -ETIMEDOUT
- */
- if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
- PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
- if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
- rc = -ETIMEDOUT;
- goto out_handle_response;
- }
- }
-
- /* no command block for abort task or abort task failed to abort
- * the IOARCB, then wait for 150 more seconds and initiate reset
- * sequence after timeout
- */
- if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(150 * 1000))) {
- pmcraid_reset_bringup(cmd->drv_inst);
- rc = -ETIMEDOUT;
- }
- }
-
-out_handle_response:
- /* copy entire IOASA buffer and return IOCTL success.
- * If copying IOASA to user-buffer fails, return
- * EFAULT
- */
- if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
- sizeof(struct pmcraid_ioasa))) {
- pmcraid_err("failed to copy ioasa buffer to user\n");
- rc = -EFAULT;
- }
-
- /* If the data transfer was from device, copy the data onto user
- * buffers
- */
- else if (direction == DMA_FROM_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- rc = -EFAULT;
- }
- }
-
-out_free_sglist:
- pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
- pmcraid_return_cmd(cmd);
-
-out_free_buffer:
- kfree(buffer);
-
- return rc;
-}
-
-
-
-
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
@@ -3922,20 +3445,6 @@ static long pmcraid_chr_ioctl(
switch (_IOC_TYPE(cmd)) {
- case PMCRAID_PASSTHROUGH_IOCTL:
- /* If ioctl code is to download microcode, we need to block
- * mid-layer requests.
- */
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_block_requests(pinstance->host);
-
- retval = pmcraid_ioctl_passthrough(pinstance, cmd,
- hdr->buffer_length, argp);
-
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_unblock_requests(pinstance->host);
- break;
-
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance, cmd,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index bbb75318f1e7..9f59930e8b4f 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header {
#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
/*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- * the transfer directions passed in ioarcb.flags0. Contents
- * of this buffer are valid only when ioarcb.data_transfer_len
- * is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
- struct pmcraid_ioctl_header ioctl_header;
- struct pmcraid_ioarcb ioarcb;
- struct pmcraid_ioasa ioasa;
- u8 request_buffer[];
-} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
-
-/*
* keys to differentiate between driver handled IOCTLs and passthrough
* IOCTLs passed to IOA. driver determines the ioctl type using macro
* _IOC_TYPE
*/
#define PMCRAID_DRIVER_IOCTL 'D'
-#define PMCRAID_PASSTHROUGH_IOCTL 'F'
#define DRV_IOCTL(n, size) \
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
-#define FMW_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
-
/*
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
* This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
#define PMCRAID_IOCTL_RESET_ADAPTER \
DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
- FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
- FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8196f89f404e..31ec429104e2 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 85dbf81f3204..6dfcfd8e7337 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3826,6 +3826,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c607755cce00..592a290e6cfa 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -32,7 +32,6 @@
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
static bool sdebug_random = DEF_RANDOM;
static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("Exit early due to deflect_incoming\n");
- return 1;
- }
if (devip == NULL) {
devip = find_build_dev_info(sdp);
if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
- struct scsi_cmnd *scp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
spin_lock_irqsave(&sqp->qc_lock, iflags);
for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
if (test_bit(k, sqp->in_use_bm)) {
sqcp = &sqp->qc_arr[k];
- scp = sqcp->a_cmnd;
- if (!scp)
+ if (sqcp->a_cmnd == NULL)
continue;
devip = (struct sdebug_dev_info *)
sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
stop_qc_helper(sd_dp, l_defer_t);
- if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
- scp->result = DID_NO_CONNECT << 16;
- scsi_done(scp);
- }
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
}
}
spin_unlock(&sdebug_host_list_lock);
- stop_all_queued(false);
+ stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
}
}
-static void sdeb_block_all_queues(void)
-{
- int j;
- struct sdebug_queue *sqp;
-
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
{
int j;
struct sdebug_queue *sqp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
- if (num_hosts < 1)
- return;
- do {
- bool found;
- unsigned long idx;
- struct sdeb_store_info *sip;
- bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
- found = false;
- if (want_phs) {
- xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
- sdeb_most_recent_idx = (int)idx;
- found = true;
- break;
- }
- if (found) /* re-use case */
- sdebug_add_host_helper((int)idx);
- else
- sdebug_do_add_host(true /* make new store */);
- } else {
- sdebug_do_add_host(false);
- }
- } while (--num_hosts);
+ atomic_set(&sqp->blocked, (int)block);
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
- u8 opcode = scp->cmnd[0];
-
- if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
- return 0;
- return DID_NO_CONNECT << 16;
-}
-
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
/* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int scsi_result,
- int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+ int (*pfp)(struct scsi_cmnd *,
+ struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
sdp = cmnd->device;
- if (delta_jiff == 0) {
- sqp = get_queue(cmnd);
- if (atomic_read(&sqp->blocked)) {
- if (smp_load_acquire(&sdebug_deflect_incoming))
- return process_deflect_incoming(cmnd);
- else
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ if (delta_jiff == 0)
goto respond_in_thread;
- }
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- scsi_result = process_deflect_incoming(cmnd);
- goto respond_in_thread;
- }
- if (sdebug_verbose)
- pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
cmnd->result &= ~SDEG_RES_IMMED_MASK;
- if (cmnd->result == 0 && scsi_result != 0) {
+ if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
- if (sdebug_verbose)
- pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
- blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
- }
scsi_done(cmnd);
return 0;
}
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
k = 0;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
atomic_set(&retired_max_queue, k + 1);
else
atomic_set(&retired_max_queue, 0);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return count;
}
return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
/* absolute number of hosts currently active is what is shown */
- return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
- if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+ if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
- if (sdebug_verbose)
- pr_info("prior num_hosts=%d, num_to_add=%d\n",
- atomic_read(&sdebug_num_hosts), delta_hosts);
- if (delta_hosts == 0)
- return count;
- if (mutex_trylock(&add_host_mutex) == 0)
- return -EBUSY;
if (delta_hosts > 0) {
- sdeb_add_n_hosts(delta_hosts);
- } else if (delta_hosts < 0) {
- smp_store_release(&sdebug_deflect_incoming, true);
- sdeb_block_all_queues();
- if (delta_hosts >= atomic_read(&sdebug_num_hosts))
- stop_all_queued(true);
do {
- if (atomic_read(&sdebug_num_hosts) < 1) {
- free_all_queued();
- break;
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
}
+ } while (--delta_hosts);
+ } else if (delta_hosts < 0) {
+ do {
sdebug_do_remove_host(false);
} while (++delta_hosts);
- sdeb_unblock_all_queues();
- smp_store_release(&sdebug_deflect_incoming, false);
}
- mutex_unlock(&add_host_mutex);
- if (sdebug_verbose)
- pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
return count;
}
static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
sdebug_add_host = 0;
for (k = 0; k < hosts_to_add; k++) {
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("exit early as sdebug_deflect_incoming is set\n");
- return 0;
- }
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
}
}
if (sdebug_verbose)
- pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
- /*
- * Even though all the hosts have been established, due to async device (LU) scanning
- * by the scsi mid-level, there may still be devices (LUs) being set up.
- */
return 0;
bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k;
+ int k = sdebug_num_hosts;
- /* Possible race with LUs still being set up; stop them asap */
- sdeb_block_all_queues();
- smp_store_release(&sdebug_deflect_incoming, true);
- stop_all_queued(false);
- for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+ stop_all_queued();
+ for (; k; k--)
sdebug_do_remove_host(true);
free_all_queued();
- if (sdebug_verbose)
- pr_info("removed %d hosts\n", k);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
if (error)
goto clean;
- atomic_inc(&sdebug_num_hosts);
+ ++sdebug_num_hosts;
return 0;
clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
return;
device_unregister(&sdbg_host->dev);
- atomic_dec(&sdebug_num_hosts);
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
int num_in_q = 0;
struct sdebug_dev_info *devip;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
devip = (struct sdebug_dev_info *)sdev->hostdata;
if (NULL == devip) {
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return -ENODEV;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
__func__, qdepth, num_in_q);
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return sdev->queue_depth;
}
@@ -7519,12 +7424,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct sdebug_defer *sd_dp;
sqp = sdebug_q_arr + queue_num;
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (qc_idx >= sdebug_max_queue)
- return 0;
spin_lock_irqsave(&sqp->qc_lock, iflags);
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ goto unlock;
+
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
if (first) {
first = false;
@@ -7589,6 +7495,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
break;
}
+unlock:
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (num_entries > 0)
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index ff89de86545d..b02af340c2d3 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
- if (!rq->q->disk)
+ if (!rq->q || !rq->q->disk)
return NULL;
return rq->q->disk->disk_name;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index f4e6c68ac99e..2ef78083f1ef 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int ret;
struct sbitmap sb_backup;
+ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
/*
* realloc if new shift is calculated, which is caused by setting
* up one new default queue depth after calling ->slave_configure
@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
scsi_device_max_queue_depth(sdev),
new_shift, GFP_KERNEL,
sdev->request_queue->node, false, true);
+ if (!ret)
+ sbitmap_resize(&sdev->budget_map, depth);
+
if (need_free) {
if (ret)
sdev->budget_map = sb_backup;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 226a50944c00..dc6872e352bd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1384,10 +1384,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
sdev->bsg_dev = scsi_bsg_register_queue(sdev);
if (IS_ERR(sdev->bsg_dev)) {
- /*
- * We're treating error on bsg register as non-fatal, so
- * pretend nothing went wrong.
- */
error = PTR_ERR(sdev->bsg_dev);
sdev_printk(KERN_INFO, sdev,
"Failed to register bsg queue, errno=%d\n",
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 27951ea05dd4..2c0dd64159b0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,6 +86,9 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, ep->id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
kfree(ep);
}
@@ -180,7 +188,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- const uint64_t *epid = data;
-
- return *epid == ep->id;
-}
-
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
- struct device *dev;
struct iscsi_endpoint *ep;
- uint64_t id;
- int err;
-
- for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
- iscsi_match_epid);
- if (!dev)
- break;
- else
- put_device(dev);
- }
- if (id == ISCSI_MAX_EPID) {
- printk(KERN_ERR "Too many connections. Max supported %u\n",
- ISCSI_MAX_EPID - 1);
- return NULL;
- }
+ int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+ id);
+ goto free_ep;
+ }
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_ep;
+ goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -248,6 +240,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
+free_id:
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct device *dev;
+ struct iscsi_endpoint *ep;
- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
- iscsi_match_epid);
- if (!dev)
- return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ ep = idr_find(&iscsi_ep_idr, handle);
+ if (!ep)
+ goto unlock;
- return iscsi_dev_to_endpoint(dev);
+ get_device(&ep->dev);
+unlock:
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
static int iscsi_if_stop_conn(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@@ -2238,11 +2280,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
+ * For offload, when iscsid is restarted it won't know about
+ * existing endpoints so it can't do a ep_disconnect. We clean
+ * it up here for userspace.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* Figure out if it was the kernel or userspace initiating this.
*/
+ spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
+ spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
+ spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
- struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
- struct iscsi_endpoint *ep;
-
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
- conn->state = ISCSI_CONN_FAILED;
-
- if (!conn->ep || !session->transport->ep_disconnect)
- return;
-
- ep = conn->ep;
- conn->ep = NULL;
-
- session->transport->unbind_conn(conn, is_active);
- session->transport->ep_disconnect(ep);
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
mutex_lock(&conn->ep_mutex);
/*
- * If we are not at least bound there is nothing for us to do. Userspace
- * will do a ep_disconnect call if offload is used, but will not be
- * doing a stop since there is nothing to clean up, so we have to clear
- * the cleanup bit here.
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
- if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
- ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
- clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
- mutex_unlock(&conn->ep_mutex);
- return;
- }
-
+ if (conn->ep)
+ get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+ int state;
- if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
- queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- if ((conn->state == ISCSI_CONN_BOUND) ||
- (conn->state == ISCSI_CONN_UP)) {
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
}
mutex_lock(&conn->ep_mutex);
- /* Check if this was a conn error and the kernel took ownership */
- if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
- ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
- mutex_unlock(&conn->ep_mutex);
-
- flush_work(&conn->cleanup_work);
- goto put_ep;
- }
-
- iscsi_ep_disconnect(conn, false);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
return -EINVAL;
mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
+ spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
- if (conn->ep) {
- /*
- * For offload boot support where iscsid is restarted
- * during the pivot root stage, the ep will be intact
- * here when the new iscsid instance starts up and
- * reconnects.
- */
- iscsi_ep_disconnect(conn, true);
- }
-
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
- if (conn->state >= 0 &&
- conn->state < ARRAY_SIZE(connection_state_names))
- state = connection_state_names[conn->state];
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a390679cf458..9694e2cfaf9a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -797,7 +797,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
case SD_LBP_UNMAP:
@@ -830,7 +829,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
}
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
@@ -3216,6 +3214,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
sd_print_capacity(sdkp, old_capacity);
@@ -3225,7 +3224,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_read_cpr(sdkp);
}
/*
@@ -3475,6 +3473,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
put_device(&sdkp->disk_dev);
+ blk_cleanup_disk(gd);
goto out;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5ba9df334968..cbd92891a762 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -535,7 +535,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
scsi_autopm_get_device(sdev);
- if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+ if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
goto put;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ddd00efc4882..fbdb5124d7f7 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
+err:
kfree(buffer);
return result;
}
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
+err:
kfree(buffer);
return result;
}
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
- char *buffer = kmalloc(32, GFP_KERNEL);
+ char *buffer = kzalloc(32, GFP_KERNEL);
int result;
if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = IOCTL_TIMEOUT;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
+err:
kfree(buffer);
return result;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 0d2e950d0865..586c0e567ff9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -957,18 +957,6 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
.deassert = ufs_qcom_reset_deassert,
};
-#define ANDROID_BOOT_DEV_MAX 30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
- strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
- return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -988,9 +976,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct resource *res;
struct ufs_clk_info *clki;
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
-
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index f76692053ca1..e892b9feffb1 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
+static int ufs_intel_mtl_init(struct ufs_hba *hba)
+{
+ hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+ return ufs_intel_common_init(hba);
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_mtl_init,
+ .exit = ufs_intel_common_exit,
+ .hce_enable_notify = ufs_intel_hce_enable_notify,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
@@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 88c20f3608c2..94f545be183a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -820,8 +820,6 @@ struct ufs_hba {
enum ufs_pm_level rpm_lvl;
/* Desired UFS power management level during system PM */
enum ufs_pm_level spm_lvl;
- struct device_attribute rpm_lvl_attr;
- struct device_attribute spm_lvl_attr;
int pm_op_in_progress;
/* Auto-Hibernate Idle Timer register value */
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index b2bec19022cd..588c0329b80c 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -867,12 +867,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
struct ufshpb_region *rgn, *victim_rgn = NULL;
list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
- if (!rgn) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: no region allocated\n",
- __func__);
- return NULL;
- }
if (ufshpb_check_srgns_issue_state(hpb, rgn))
continue;
@@ -888,6 +882,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
break;
}
+ if (!victim_rgn)
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: no region allocated\n",
+ __func__);
+
return victim_rgn;
}
@@ -1255,6 +1254,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
int data_seg_len;
+ data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+ & MASK_RSP_UPIU_DATA_SEG_LEN;
+
+ /* If data segment length is zero, rsp_field is not valid */
+ if (!data_seg_len)
+ return;
+
if (unlikely(lrbp->lun != rsp_field->lun)) {
struct scsi_device *sdev;
bool found = false;
@@ -1289,18 +1295,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return;
}
- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
- & MASK_RSP_UPIU_DATA_SEG_LEN;
-
- /* To flush remained rsp_list, we queue the map_work task */
- if (!data_seg_len) {
- if (!ufshpb_is_general_lun(hpb->lun))
- return;
-
- ufshpb_kick_map_work(hpb);
- return;
- }
-
BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 0e6110da69e7..578c4b6d0f7d 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1020,14 +1020,14 @@ error:
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 27b9e2baab1a..7acf9193a9e8 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
scsi_remove_host(host);
NCR_700_release(host);
+ if (host->base > 0x01000000)
+ iounmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
zorro_release_device(z);
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index f04b961b96cd..ec58091fc948 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -510,9 +510,9 @@ static int qcom_slim_probe(struct platform_device *pdev)
}
ctrl->irq = platform_get_irq(pdev, 0);
- if (!ctrl->irq) {
+ if (ctrl->irq < 0) {
dev_err(&pdev->dev, "no slimbus IRQ\n");
- return -ENODEV;
+ return ctrl->irq;
}
sctrl = &ctrl->ctrl;
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 122f9c884b38..ccd0577a771e 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -50,7 +50,7 @@ struct imx8m_blk_ctrl_domain_data {
u32 mipi_phy_rst_mask;
};
-#define DOMAIN_MAX_CLKS 3
+#define DOMAIN_MAX_CLKS 4
struct imx8m_blk_ctrl_domain {
struct generic_pm_domain genpd;
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 92d9610df1fd..938017a60c8e 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
if (atmel_qspi_find_mode(op) < 0)
return false;
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 86c76211b3d3..cad2d55dcd3d 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
addr = op->addr.val;
len = op->data.nbytes;
- if (bcm_qspi_bspi_ver_three(qspi) == true) {
+ if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
/*
* The address coming into this function is a raw flash offset.
* But for BSPI <= V3, we need to convert it to a remapped BSPI
@@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
len < 4)
mspi_read = true;
- if (mspi_read)
+ if (!has_bspi(qspi) || mspi_read)
return bcm_qspi_mspi_exec_mem_op(spi, op);
ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index b0c9f62ccefb..19686fb47bb3 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
@@ -102,12 +103,6 @@ struct cqspi_driver_platdata {
#define CQSPI_TIMEOUT_MS 500
#define CQSPI_READ_TIMEOUT_MS 10
-/* Instruction type */
-#define CQSPI_INST_TYPE_SINGLE 0
-#define CQSPI_INST_TYPE_DUAL 1
-#define CQSPI_INST_TYPE_QUAD 2
-#define CQSPI_INST_TYPE_OCTAL 3
-
#define CQSPI_DUMMY_CLKS_PER_BYTE 8
#define CQSPI_DUMMY_BYTES_MAX 4
#define CQSPI_DUMMY_CLKS_MAX 31
@@ -376,10 +371,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
- f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
/*
* For an op to be DTR, cmd phase along with every other non-empty
* phase should have dtr field set to 1. If an op phase has zero
@@ -389,32 +380,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
(!op->addr.nbytes || op->addr.dtr) &&
(!op->data.nbytes || op->data.dtr);
- switch (op->data.buswidth) {
- case 0:
- break;
- case 1:
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
- break;
- case 2:
- f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
- break;
- case 4:
- f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
- break;
- case 8:
- f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
- break;
- default:
- return -EINVAL;
- }
+ f_pdata->inst_width = 0;
+ if (op->cmd.buswidth)
+ f_pdata->inst_width = ilog2(op->cmd.buswidth);
+
+ f_pdata->addr_width = 0;
+ if (op->addr.buswidth)
+ f_pdata->addr_width = ilog2(op->addr.buswidth);
+
+ f_pdata->data_width = 0;
+ if (op->data.buswidth)
+ f_pdata->data_width = ilog2(op->data.buswidth);
/* Right now we only support 8-8-8 DTR mode. */
if (f_pdata->dtr) {
switch (op->cmd.buswidth) {
case 0:
- break;
case 8:
- f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
return -EINVAL;
@@ -422,9 +404,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
switch (op->addr.buswidth) {
case 0:
- break;
case 8:
- f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
return -EINVAL;
@@ -432,9 +412,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
switch (op->data.buswidth) {
case 0:
- break;
case 8:
- f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
return -EINVAL;
@@ -1437,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
- /* Mixed DTR modes not supported. */
- if (!(all_true || all_false))
+ if (all_true) {
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (op->cmd.nbytes && op->cmd.buswidth != 8)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth != 8)
+ return false;
+ if (op->data.nbytes && op->data.buswidth != 8)
+ return false;
+ } else if (all_false) {
+ /* Only 1-1-X ops are supported without DTR */
+ if (op->cmd.nbytes && op->cmd.buswidth > 1)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth > 1)
+ return false;
+ } else {
+ /* Mixed DTR modes are not supported. */
return false;
+ }
return spi_mem_default_supports_op(mem, op);
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index a5ef7a526a7f..f6eec7a869b6 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 94fb09696677..d167699a1a96 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -960,7 +960,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
- return pm_runtime_force_resume(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ mtk_nor_init(sp);
+
+ return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 55c092069301..65be8e085ab8 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -813,6 +813,7 @@ static int mxic_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
pm_runtime_disable(&pdev->dev);
+ mxic_spi_mem_ecc_remove(mxic);
}
return ret;
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index fe82f3575df4..24ec1c83f379 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev)
error = rpcif_hw_init(rpc, false);
if (error)
- return error;
+ goto out_disable_rpm;
error = spi_register_controller(ctlr);
if (error) {
dev_err(&pdev->dev, "spi_register_controller failed\n");
- rpcif_disable_rpm(rpc);
+ goto out_disable_rpm;
}
+ return 0;
+
+out_disable_rpm:
+ rpcif_disable_rpm(rpc);
return error;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c4dd1200fe99..2e6d6bbeb784 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1130,11 +1130,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
+ else if (ctlr->dma_map_dev)
+ tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
+ else if (ctlr->dma_map_dev)
+ rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
@@ -2406,7 +2410,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
} else {
struct acpi_device *adev;
- if (acpi_bus_get_device(parent_handle, &adev))
+ adev = acpi_fetch_acpi_dev(parent_handle);
+ if (!adev)
return -ENODEV;
ctlr = acpi_spi_find_controller_by_adev(adev);
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index d68611ef22f8..f056204c0fdb 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -70,7 +70,7 @@ static int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
int data_len;
- data_len = tag->tag_len + TAG_HDR_LEN;
+ data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
if (skb_tailroom(skb) < data_len)
return -1;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6fe6a6bab3f4..ddf6c2a7212b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3596,10 +3596,7 @@ static int iscsit_send_reject(
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
int ord, cpu;
- cpumask_t conn_allowed_cpumask;
-
- cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
- cpu_online_mask);
+ cpumask_var_t conn_allowed_cpumask;
/*
* bitmap_id is assigned from iscsit_global->ts_bitmap from
@@ -3609,13 +3606,28 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
* iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- cpumask_clear(conn->conn_cpumask);
- ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
- for_each_cpu(cpu, &conn_allowed_cpumask) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, conn->conn_cpumask);
- return;
+ if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ return;
+ }
+ }
+ } else {
+ cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
+ cpu_online_mask);
+
+ cpumask_clear(conn->conn_cpumask);
+ ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
+ for_each_cpu(cpu, conn_allowed_cpumask) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ free_cpumask_var(conn_allowed_cpumask);
+ return;
+ }
}
+ free_cpumask_var(conn_allowed_cpumask);
}
/*
* This should never be reached..
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0cedcfe207b5..57b4fd56d92a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1137,23 +1137,27 @@ static ssize_t lio_target_wwn_cpus_allowed_list_show(
static ssize_t lio_target_wwn_cpus_allowed_list_store(
struct config_item *item, const char *page, size_t count)
{
- int ret;
+ int ret = -ENOMEM;
char *orig;
- cpumask_t new_allowed_cpumask;
+ cpumask_var_t new_allowed_cpumask;
+
+ if (!zalloc_cpumask_var(&new_allowed_cpumask, GFP_KERNEL))
+ goto out;
orig = kstrdup(page, GFP_KERNEL);
if (!orig)
- return -ENOMEM;
+ goto out_free_cpumask;
- cpumask_clear(&new_allowed_cpumask);
- ret = cpulist_parse(orig, &new_allowed_cpumask);
+ ret = cpulist_parse(orig, new_allowed_cpumask);
+ if (!ret)
+ cpumask_copy(iscsit_global->allowed_cpumask,
+ new_allowed_cpumask);
kfree(orig);
- if (ret != 0)
- return ret;
-
- cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
- return count;
+out_free_cpumask:
+ free_cpumask_var(new_allowed_cpumask);
+out:
+ return ret ? ret : count;
}
CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 44bb380e7390..25f33eb25337 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -829,28 +829,26 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
}
/*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
+ * Check if the underlying struct block_device supports discard and if yes
+ * configure the UNMAP parameters.
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q)
+ struct block_device *bdev)
{
- int block_size = queue_logical_block_size(q);
+ int block_size = bdev_logical_block_size(bdev);
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(bdev))
return false;
attrib->max_unmap_lba_count =
- q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
+ bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
attrib->max_unmap_block_desc_count = 1;
- attrib->unmap_granularity = q->limits.discard_granularity / block_size;
- attrib->unmap_granularity_alignment = q->limits.discard_alignment /
- block_size;
- attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
+ attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
+ attrib->unmap_granularity_alignment =
+ bdev_discard_alignment(bdev) / block_size;
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 8190b840065f..e68f1cc8ef98 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -134,10 +134,10 @@ static int fd_configure_device(struct se_device *dev)
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
unsigned long long dev_size;
- fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode));
+ fd_dev->fd_block_size = bdev_logical_block_size(bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
@@ -150,7 +150,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
@@ -159,7 +159,7 @@ static int fd_configure_device(struct se_device *dev)
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
- if (blk_queue_nonrot(q))
+ if (bdev_nonrot(bdev))
dev->dev_attrib.is_nonrot = 1;
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@@ -558,7 +558,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
ret = blkdev_issue_discard(bdev,
target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
- GFP_KERNEL, 0);
+ GFP_KERNEL);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 87ede165ddba..378c80313a0f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -119,7 +119,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
@@ -133,7 +133,7 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_attrib.max_write_same_len = 0xFFFF;
- if (blk_queue_nonrot(q))
+ if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd);
@@ -434,7 +434,7 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
ret = blkdev_issue_discard(bdev,
target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
- GFP_KERNEL, 0);
+ GFP_KERNEL);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -727,17 +727,16 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
* Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
opf = REQ_OP_WRITE;
miter_dir = SG_MITER_TO_SG;
- if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
+ if (bdev_fua(ib_dev->ibd_bd)) {
if (cmd->se_cmd_flags & SCF_FUA)
opf |= REQ_FUA;
- else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
} else {
@@ -886,11 +885,7 @@ iblock_parse_cdb(struct se_cmd *cmd)
static bool iblock_get_write_cache(struct se_device *dev)
{
- struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct block_device *bd = ib_dev->ibd_bd;
- struct request_queue *q = bdev_get_queue(bd);
-
- return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
+ return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
}
static const struct target_backend_ops iblock_ops = {
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ff292b75e23f..bb3fb18b2316 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -588,7 +588,7 @@ static void pscsi_destroy_device(struct se_device *dev)
}
static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
- unsigned char *req_sense)
+ unsigned char *req_sense, int valid_data)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
@@ -681,7 +681,7 @@ after_mode_select:
* back despite framework assumption that a
* check condition means there is no data
*/
- if (sd->type == TYPE_TAPE &&
+ if (sd->type == TYPE_TAPE && valid_data &&
cmd->data_direction == DMA_FROM_DEVICE) {
/*
* is sense data valid, fixed format,
@@ -818,24 +818,8 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
static void pscsi_bi_endio(struct bio *bio)
{
- bio_put(bio);
-}
-
-static inline struct bio *pscsi_get_bio(int nr_vecs)
-{
- struct bio *bio;
- /*
- * Use bio_malloc() following the comment in for bio -> struct request
- * in block/blk-core.c:blk_make_request()
- */
- bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
- if (!bio) {
- pr_err("PSCSI: bio_kmalloc() failed\n");
- return NULL;
- }
- bio->bi_end_io = pscsi_bi_endio;
-
- return bio;
+ bio_uninit(bio);
+ kfree(bio);
}
static sense_reason_t
@@ -878,15 +862,12 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio) {
new_bio:
nr_vecs = bio_max_segs(nr_pages);
- /*
- * Calls bio_kmalloc() and sets bio->bi_end_io()
- */
- bio = pscsi_get_bio(nr_vecs);
+ bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
if (!bio)
goto fail;
-
- if (rw)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs,
+ rw ? REQ_OP_WRITE : REQ_OP_READ);
+ bio->bi_end_io = pscsi_bi_endio;
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
@@ -912,11 +893,6 @@ new_bio:
goto fail;
}
- /*
- * Clear the pointer so that another bio will
- * be allocated with pscsi_get_bio() above.
- */
- bio = NULL;
goto new_bio;
}
@@ -1032,6 +1008,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum sam_status scsi_status = scmd->result & 0xff;
+ int valid_data = cmd->data_length - scmd->resid_len;
u8 *cdb = cmd->priv;
if (scsi_status != SAM_STAT_GOOD) {
@@ -1039,12 +1016,11 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
- pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer);
+ pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
switch (host_byte(scmd->result)) {
case DID_OK:
- target_complete_cmd_with_length(cmd, scsi_status,
- cmd->data_length - scmd->resid_len);
+ target_complete_cmd_with_length(cmd, scsi_status, valid_data);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 95d4ca50a605..fd7267baa707 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1821,6 +1821,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
mutex_lock(&udev->cmdr_lock);
page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
+ get_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
@@ -1877,6 +1878,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
/* For the vmalloc()ed cmd area pages */
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
+ get_page(page);
} else {
uint32_t dpi;
@@ -1887,7 +1889,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- get_page(page);
vmf->page = page;
return 0;
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index a5eb4ef46971..c9b3b2cfb2b2 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -865,6 +865,7 @@ err_rhashtable_free:
rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
+ mutex_destroy(&optee->ffa.mutex);
err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
err_unreg_teedev:
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index e37691e0bf20..0e5cc948373c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -113,8 +113,10 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
bool "user_space"
select THERMAL_GOV_USER_SPACE
help
- Select this if you want to let the user space manage the
- platform thermals.
+ The Userspace governor allows to get trip point crossed
+ notification from the kernel via uevents. It is recommended
+ to use the netlink interface instead which gives richer
+ information about the thermal framework events.
config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
bool "power_allocator"
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index 64a18e354a20..a62a4e90bd3f 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -17,8 +17,7 @@
static int user_space_bind(struct thermal_zone_device *tz)
{
- pr_warn_once("Userspace governor deprecated: use thermal netlink " \
- "notification instead\n");
+ pr_info_once("Consider using thermal netlink events interface\n");
return 0;
}
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 4954800b9850..79931ddc582a 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -68,7 +68,7 @@ static int evaluate_odvp(struct int3400_thermal_priv *priv);
struct odvp_attr {
int odvp;
struct int3400_thermal_priv *priv;
- struct kobj_attribute attr;
+ struct device_attribute attr;
};
static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
@@ -194,12 +194,31 @@ static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enab
return result;
}
+static int set_os_uuid_mask(struct int3400_thermal_priv *priv, u32 mask)
+{
+ int cap = 0;
+
+ /*
+ * Capability bits:
+ * Bit 0: set to 1 to indicate DPTF is active
+ * Bi1 1: set to 1 to active cooling is supported by user space daemon
+ * Bit 2: set to 1 to passive cooling is supported by user space daemon
+ * Bit 3: set to 1 to critical trip is handled by user space daemon
+ */
+ if (mask)
+ cap = (priv->os_uuid_mask << 1) | 0x01;
+
+ return int3400_thermal_run_osc(priv->adev->handle,
+ "b23ba85d-c8b7-3542-88de-8de2ffcfd698",
+ &cap);
+}
+
static ssize_t current_uuid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
- int i;
+ int ret, i;
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) {
if (!strncmp(buf, int3400_thermal_uuids[i],
@@ -231,19 +250,7 @@ static ssize_t current_uuid_store(struct device *dev,
}
if (priv->os_uuid_mask) {
- int cap, ret;
-
- /*
- * Capability bits:
- * Bit 0: set to 1 to indicate DPTF is active
- * Bi1 1: set to 1 to active cooling is supported by user space daemon
- * Bit 2: set to 1 to passive cooling is supported by user space daemon
- * Bit 3: set to 1 to critical trip is handled by user space daemon
- */
- cap = ((priv->os_uuid_mask << 1) | 0x01);
- ret = int3400_thermal_run_osc(priv->adev->handle,
- "b23ba85d-c8b7-3542-88de-8de2ffcfd698",
- &cap);
+ ret = set_os_uuid_mask(priv, priv->os_uuid_mask);
if (ret)
return ret;
}
@@ -311,7 +318,7 @@ end:
return result;
}
-static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
+static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct odvp_attr *odvp_attr;
@@ -469,17 +476,26 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
if (mode != thermal->mode) {
int enabled;
+ enabled = mode == THERMAL_DEVICE_ENABLED;
+
+ if (priv->os_uuid_mask) {
+ if (!enabled) {
+ priv->os_uuid_mask = 0;
+ result = set_os_uuid_mask(priv, priv->os_uuid_mask);
+ }
+ goto eval_odvp;
+ }
+
if (priv->current_uuid_index < 0 ||
priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
return -EINVAL;
- enabled = (mode == THERMAL_DEVICE_ENABLED);
result = int3400_thermal_run_osc(priv->adev->handle,
int3400_thermal_uuids[priv->current_uuid_index],
&enabled);
}
-
+eval_odvp:
evaluate_odvp(priv);
return result;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index f154bada2906..1c4aac8464a7 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -610,9 +610,6 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
unsigned long state;
int result;
- dev_warn_once(&cdev->device,
- "Setting cooling device state is deprecated\n");
-
if (sscanf(buf, "%ld\n", &state) != 1)
return -EINVAL;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fa92f727fdf8..fd8b86dde525 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -73,6 +73,8 @@ module_param(debug, int, 0600);
*/
#define MAX_MRU 1500
#define MAX_MTU 1500
+/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */
+#define PROT_OVERHEAD 7
#define GSM_NET_TX_TIMEOUT (HZ*10)
/*
@@ -135,6 +137,7 @@ struct gsm_dlci {
int retries;
/* Uplink tty if active */
struct tty_port port; /* The tty bound to this DLCI if there is one */
+#define TX_SIZE 4096 /* Must be power of 2. */
struct kfifo fifo; /* Queue fifo for the DLCI */
int adaption; /* Adaption layer in use */
int prev_adaption;
@@ -219,7 +222,6 @@ struct gsm_mux {
int encoding;
u8 control;
u8 fcs;
- u8 received_fcs;
u8 *txframe; /* TX framing buffer */
/* Method for the receiver side */
@@ -231,6 +233,7 @@ struct gsm_mux {
int initiator; /* Did we initiate connection */
bool dead; /* Has the mux been shut down */
struct gsm_dlci *dlci[NUM_DLCI];
+ int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
spinlock_t tx_lock;
@@ -271,10 +274,6 @@ static DEFINE_SPINLOCK(gsm_mux_lock);
static struct tty_driver *gsm_tty_driver;
-/* Save dlci open address */
-static int addr_open[256] = { 0 };
-/* Save dlci open count */
-static int addr_cnt;
/*
* This section of the driver logic implements the GSM encodings
* both the basic and the 'advanced'. Reliable transport is not
@@ -369,6 +368,7 @@ static const u8 gsm_fcs8[256] = {
#define GOOD_FCS 0xCF
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
+static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
/**
* gsm_fcs_add - update FCS
@@ -832,7 +832,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
break;
case 2: /* Unstructed with modem bits.
Always one byte as we never send inline break data */
- *dp++ = gsm_encode_modem(dlci);
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
break;
}
WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
@@ -917,6 +917,66 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
}
/**
+ * gsm_dlci_modem_output - try and push modem status out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull modem status from
+ * @brk: break signal
+ *
+ * Push an empty frame in to the transmit queue to update the modem status
+ * bits and to transmit an optional break.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
+ u8 brk)
+{
+ u8 *dp = NULL;
+ struct gsm_msg *msg;
+ int size = 0;
+
+ /* for modem bits without break data */
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits. */
+ size++;
+ if (brk > 0)
+ size++;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ return -EINVAL;
+ }
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ if (!msg) {
+ pr_err("%s: gsm_data_alloc error", __func__);
+ return -ENOMEM;
+ }
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits. */
+ if (brk == 0) {
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ } else {
+ *dp++ = gsm_encode_modem(dlci) << 1;
+ *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */
+ }
+ break;
+ default:
+ /* Handled above */
+ break;
+ }
+
+ __gsm_data_queue(dlci, msg);
+ return size;
+}
+
+/**
* gsm_dlci_data_sweep - look for data to send
* @gsm: the GSM mux
*
@@ -1093,7 +1153,6 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
- unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
int slen;
@@ -1123,17 +1182,8 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
}
len--;
- if (len > 0) {
- while (gsm_read_ea(&brk, *dp++) == 0) {
- len--;
- if (len == 0)
- return;
- }
- modem <<= 7;
- modem |= (brk & 0x7f);
- }
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, slen);
+ gsm_process_modem(tty, dlci, modem, slen - len);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1193,7 +1243,6 @@ static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen)
}
static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
-static void gsm_dlci_close(struct gsm_dlci *dlci);
/**
* gsm_control_message - DLCI 0 control processing
@@ -1212,28 +1261,15 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
{
u8 buf[1];
unsigned long flags;
- struct gsm_dlci *dlci;
- int i;
- int address;
switch (command) {
case CMD_CLD: {
- if (addr_cnt > 0) {
- for (i = 0; i < addr_cnt; i++) {
- address = addr_open[i];
- dlci = gsm->dlci[address];
- gsm_dlci_close(dlci);
- addr_open[i] = 0;
- }
- }
+ struct gsm_dlci *dlci = gsm->dlci[0];
/* Modem wishes to close down */
- dlci = gsm->dlci[0];
if (dlci) {
dlci->dead = true;
gsm->dead = true;
- gsm_dlci_close(dlci);
- addr_cnt = 0;
- gsm_response(gsm, 0, UA|PF);
+ gsm_dlci_begin_close(dlci);
}
}
break;
@@ -1326,11 +1362,12 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
{
- struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype);
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype);
if (msg == NULL)
return;
- msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
- memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */
+ msg->data[1] = (ctrl->len << 1) | EA;
+ memcpy(msg->data + 2, ctrl->data, ctrl->len);
gsm_data_queue(gsm->dlci[0], msg);
}
@@ -1353,7 +1390,6 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- gsm->cretries--;
if (gsm->cretries == 0) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
@@ -1362,6 +1398,7 @@ static void gsm_control_retransmit(struct timer_list *t)
wake_up(&gsm->event);
return;
}
+ gsm->cretries--;
gsm_control_transmit(gsm, ctrl);
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
}
@@ -1402,7 +1439,7 @@ retry:
/* If DLCI0 is in ADM mode skip retries, it won't respond */
if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
- gsm->cretries = 1;
+ gsm->cretries = 0;
else
gsm->cretries = gsm->n2;
@@ -1450,20 +1487,22 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
+ unsigned long flags;
+
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
+ spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
- /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
- tty_unregister_device(gsm_tty_driver, dlci->addr);
wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
@@ -1485,8 +1524,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
dlci->state = DLCI_OPEN;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
- /* Register gsmtty driver,report gsmtty dev add uevent for user */
- tty_register_device(gsm_tty_driver, dlci->addr, NULL);
+ /* Send current modem state */
+ if (dlci->addr)
+ gsm_modem_update(dlci, 0);
wake_up(&dlci->gsm->event);
}
@@ -1619,10 +1659,12 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
slen++;
tty = tty_port_tty_get(port);
if (tty) {
gsm_process_modem(tty, dlci, modem, slen);
+ tty_wakeup(tty);
tty_kref_put(tty);
}
fallthrough;
@@ -1690,7 +1732,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
return NULL;
spin_lock_init(&dlci->lock);
mutex_init(&dlci->mutex);
- if (kfifo_alloc(&dlci->fifo, 4096, GFP_KERNEL) < 0) {
+ if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
kfree(dlci);
return NULL;
}
@@ -1793,19 +1835,7 @@ static void gsm_queue(struct gsm_mux *gsm)
struct gsm_dlci *dlci;
u8 cr;
int address;
- int i, j, k, address_tmp;
- /* We have to sneak a look at the packet body to do the FCS.
- A somewhat layering violation in the spec */
- if ((gsm->control & ~PF) == UI)
- gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
- if (gsm->encoding == 0) {
- /* WARNING: gsm->received_fcs is used for
- gsm->encoding = 0 only.
- In this case it contain the last piece of data
- required to generate final CRC */
- gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
- }
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
if (debug & 4)
@@ -1836,11 +1866,6 @@ static void gsm_queue(struct gsm_mux *gsm)
else {
gsm_response(gsm, address, UA|PF);
gsm_dlci_open(dlci);
- /* Save dlci open address */
- if (address) {
- addr_open[addr_cnt] = address;
- addr_cnt++;
- }
}
break;
case DISC|PF:
@@ -1851,35 +1876,9 @@ static void gsm_queue(struct gsm_mux *gsm)
return;
}
/* Real close complete */
- if (!address) {
- if (addr_cnt > 0) {
- for (i = 0; i < addr_cnt; i++) {
- address = addr_open[i];
- dlci = gsm->dlci[address];
- gsm_dlci_close(dlci);
- addr_open[i] = 0;
- }
- }
- dlci = gsm->dlci[0];
- gsm_dlci_close(dlci);
- addr_cnt = 0;
- gsm_response(gsm, 0, UA|PF);
- } else {
- gsm_response(gsm, address, UA|PF);
- gsm_dlci_close(dlci);
- /* clear dlci address */
- for (j = 0; j < addr_cnt; j++) {
- address_tmp = addr_open[j];
- if (address_tmp == address) {
- for (k = j; k < addr_cnt; k++)
- addr_open[k] = addr_open[k+1];
- addr_cnt--;
- break;
- }
- }
- }
+ gsm_response(gsm, address, UA|PF);
+ gsm_dlci_close(dlci);
break;
- case UA:
case UA|PF:
if (cr == 0 || dlci == NULL)
break;
@@ -1993,19 +1992,25 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
break;
case GSM_DATA: /* Data */
gsm->buf[gsm->count++] = c;
- if (gsm->count == gsm->len)
+ if (gsm->count == gsm->len) {
+ /* Calculate final FCS for UI frames over all data */
+ if ((gsm->control & ~PF) != UIH) {
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ gsm->count);
+ }
gsm->state = GSM_FCS;
+ }
break;
case GSM_FCS: /* FCS follows the packet */
- gsm->received_fcs = c;
- gsm_queue(gsm);
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->state = GSM_SSOF;
break;
case GSM_SSOF:
- if (c == GSM0_SOF) {
- gsm->state = GSM_SEARCH;
- break;
- }
+ gsm->state = GSM_SEARCH;
+ if (c == GSM0_SOF)
+ gsm_queue(gsm);
+ else
+ gsm->bad_size++;
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
@@ -2023,12 +2028,35 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
{
+ /* handle XON/XOFF */
+ if ((c & ISO_IEC_646_MASK) == XON) {
+ gsm->constipated = true;
+ return;
+ } else if ((c & ISO_IEC_646_MASK) == XOFF) {
+ gsm->constipated = false;
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm, NULL);
+ return;
+ }
if (c == GSM1_SOF) {
- /* EOF is only valid in frame if we have got to the data state
- and received at least one byte (the FCS) */
- if (gsm->state == GSM_DATA && gsm->count) {
- /* Extract the FCS */
+ /* EOF is only valid in frame if we have got to the data state */
+ if (gsm->state == GSM_DATA) {
+ if (gsm->count < 1) {
+ /* Missing FSC */
+ gsm->malformed++;
+ gsm->state = GSM_START;
+ return;
+ }
+ /* Remove the FCS from data */
gsm->count--;
+ if ((gsm->control & ~PF) != UIH) {
+ /* Calculate final FCS for UI frames over all
+ * data but FCS
+ */
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ gsm->count);
+ }
+ /* Add the FCS itself to test against GOOD_FCS */
gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
gsm->len = gsm->count;
gsm_queue(gsm);
@@ -2037,7 +2065,8 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
}
/* Any partial frame was a runt so go back to start */
if (gsm->state != GSM_START) {
- gsm->malformed++;
+ if (gsm->state != GSM_SEARCH)
+ gsm->malformed++;
gsm->state = GSM_START;
}
/* A SOF in GSM_START means we are still reading idling or
@@ -2106,74 +2135,43 @@ static void gsm_error(struct gsm_mux *gsm)
gsm->io_error++;
}
-static int gsm_disconnect(struct gsm_mux *gsm)
-{
- struct gsm_dlci *dlci = gsm->dlci[0];
- struct gsm_control *gc;
-
- if (!dlci)
- return 0;
-
- /* In theory disconnecting DLCI 0 is sufficient but for some
- modems this is apparently not the case. */
- gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
- if (gc)
- gsm_control_wait(gsm, gc);
-
- del_timer_sync(&gsm->t2_timer);
- /* Now we are sure T2 has stopped */
-
- gsm_dlci_begin_close(dlci);
- wait_event_interruptible(gsm->event,
- dlci->state == DLCI_CLOSED);
-
- if (signal_pending(current))
- return -EINTR;
-
- return 0;
-}
-
/**
* gsm_cleanup_mux - generic GSM protocol cleanup
* @gsm: our mux
+ * @disc: disconnect link?
*
* Clean up the bits of the mux which are the same for all framing
* protocols. Remove the mux from the mux table, stop all the timers
* and then shut down each device hanging up the channels as we go.
*/
-static void gsm_cleanup_mux(struct gsm_mux *gsm)
+static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
{
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
struct gsm_msg *txq, *ntxq;
gsm->dead = true;
+ mutex_lock(&gsm->mutex);
- spin_lock(&gsm_mux_lock);
- for (i = 0; i < MAX_MUX; i++) {
- if (gsm_mux[i] == gsm) {
- gsm_mux[i] = NULL;
- break;
+ if (dlci) {
+ if (disc && dlci->state != DLCI_CLOSED) {
+ gsm_dlci_begin_close(dlci);
+ wait_event(gsm->event, dlci->state == DLCI_CLOSED);
}
+ dlci->dead = true;
}
- spin_unlock(&gsm_mux_lock);
- /* open failed before registering => nothing to do */
- if (i == MAX_MUX)
- return;
+ /* Finish outstanding timers, making sure they are done */
del_timer_sync(&gsm->t2_timer);
- /* Now we are sure T2 has stopped */
- if (dlci)
- dlci->dead = true;
- /* Free up any link layer users */
- mutex_lock(&gsm->mutex);
- for (i = 0; i < NUM_DLCI; i++)
+ /* Free up any link layer users and finally the control channel */
+ for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
+ tty_ldisc_flush(gsm->tty);
list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_list);
@@ -2191,7 +2189,6 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
- int i = 0;
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
init_waitqueue_head(&gsm->event);
@@ -2203,18 +2200,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
else
gsm->receive = gsm1_receive;
- spin_lock(&gsm_mux_lock);
- for (i = 0; i < MAX_MUX; i++) {
- if (gsm_mux[i] == NULL) {
- gsm->num = i;
- gsm_mux[i] = gsm;
- break;
- }
- }
- spin_unlock(&gsm_mux_lock);
- if (i == MAX_MUX)
- return -EBUSY;
-
dlci = gsm_dlci_alloc(gsm, 0);
if (dlci == NULL)
return -ENOMEM;
@@ -2230,6 +2215,15 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
*/
static void gsm_free_mux(struct gsm_mux *gsm)
{
+ int i;
+
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm == gsm_mux[i]) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
@@ -2249,12 +2243,20 @@ static void gsm_free_muxr(struct kref *ref)
static inline void mux_get(struct gsm_mux *gsm)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm_mux_lock, flags);
kref_get(&gsm->ref);
+ spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline void mux_put(struct gsm_mux *gsm)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm_mux_lock, flags);
kref_put(&gsm->ref, gsm_free_muxr);
+ spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
@@ -2275,6 +2277,7 @@ static inline unsigned int mux_line_to_num(unsigned int line)
static struct gsm_mux *gsm_alloc_mux(void)
{
+ int i;
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
if (gsm == NULL)
return NULL;
@@ -2283,7 +2286,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
kfree(gsm);
return NULL;
}
- gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL);
if (gsm->txframe == NULL) {
kfree(gsm->buf);
kfree(gsm);
@@ -2304,6 +2307,26 @@ static struct gsm_mux *gsm_alloc_mux(void)
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
+ /* Store the instance to the mux array or abort if no space is
+ * available.
+ */
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (!gsm_mux[i]) {
+ gsm_mux[i] = gsm;
+ gsm->num = i;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX) {
+ mutex_destroy(&gsm->mutex);
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+
return gsm;
}
@@ -2330,6 +2353,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm,
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
{
+ int ret = 0;
int need_close = 0;
int need_restart = 0;
@@ -2339,7 +2363,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
/* Check the MRU/MTU range looks sane */
if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
return -EINVAL;
- if (c->n2 < 3)
+ if (c->n2 > 255)
return -EINVAL;
if (c->encapsulation > 1) /* Basic, advanced, no I */
return -EINVAL;
@@ -2370,19 +2394,11 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
/*
* Close down what is needed, restart and initiate the new
- * configuration
+ * configuration. On the first time there is no DLCI[0]
+ * and closing or cleaning up is not necessary.
*/
-
- if (gsm->initiator && (need_close || need_restart)) {
- int ret;
-
- ret = gsm_disconnect(gsm);
-
- if (ret)
- return ret;
- }
- if (need_restart)
- gsm_cleanup_mux(gsm);
+ if (need_close || need_restart)
+ gsm_cleanup_mux(gsm, true);
gsm->initiator = c->initiator;
gsm->mru = c->mru;
@@ -2405,10 +2421,13 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
* FIXME: We need to separate activation/deactivation from adding
* and removing from the mux array
*/
- if (need_restart)
- gsm_activate_mux(gsm);
- if (gsm->initiator && need_close)
- gsm_dlci_begin_open(gsm->dlci[0]);
+ if (gsm->dead) {
+ ret = gsm_activate_mux(gsm);
+ if (ret)
+ return ret;
+ if (gsm->initiator)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ }
return 0;
}
@@ -2450,25 +2469,26 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
int ret, i;
gsm->tty = tty_kref_get(tty);
+ /* Turn off tty XON/XOFF handling to handle it explicitly. */
+ gsm->old_c_iflag = tty->termios.c_iflag;
+ tty->termios.c_iflag &= (IXON | IXOFF);
ret = gsm_activate_mux(gsm);
if (ret != 0)
tty_kref_put(gsm->tty);
else {
/* Don't register device 0 - this is the control channel and not
a usable tty interface */
- if (gsm->initiator) {
- base = mux_num_to_base(gsm); /* Base for this MUX */
- for (i = 1; i < NUM_DLCI; i++) {
- struct device *dev;
+ base = mux_num_to_base(gsm); /* Base for this MUX */
+ for (i = 1; i < NUM_DLCI; i++) {
+ struct device *dev;
- dev = tty_register_device(gsm_tty_driver,
+ dev = tty_register_device(gsm_tty_driver,
base + i, NULL);
- if (IS_ERR(dev)) {
- for (i--; i >= 1; i--)
- tty_unregister_device(gsm_tty_driver,
- base + i);
- return PTR_ERR(dev);
- }
+ if (IS_ERR(dev)) {
+ for (i--; i >= 1; i--)
+ tty_unregister_device(gsm_tty_driver,
+ base + i);
+ return PTR_ERR(dev);
}
}
}
@@ -2490,11 +2510,10 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
int i;
WARN_ON(tty != gsm->tty);
- if (gsm->initiator) {
- for (i = 1; i < NUM_DLCI; i++)
- tty_unregister_device(gsm_tty_driver, base + i);
- }
- gsm_cleanup_mux(gsm);
+ for (i = 1; i < NUM_DLCI; i++)
+ tty_unregister_device(gsm_tty_driver, base + i);
+ /* Restore tty XON/XOFF handling. */
+ gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
gsm->tty = NULL;
}
@@ -2559,6 +2578,12 @@ static void gsmld_close(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
+ /* The ldisc locks and closes the port before calling our close. This
+ * means we have no way to do a proper disconnect. We will not bother
+ * to do one.
+ */
+ gsm_cleanup_mux(gsm, false);
+
gsmld_detach_gsm(tty, gsm);
gsmld_flush_buffer(tty);
@@ -2597,7 +2622,7 @@ static int gsmld_open(struct tty_struct *tty)
ret = gsmld_attach_gsm(tty, gsm);
if (ret != 0) {
- gsm_cleanup_mux(gsm);
+ gsm_cleanup_mux(gsm, false);
mux_put(gsm);
}
return ret;
@@ -2952,28 +2977,78 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
* Virtual tty side
*/
-#define TX_SIZE 512
+/**
+ * gsm_modem_upd_via_data - send modem bits via convergence layer
+ * @dlci: channel
+ * @brk: break signal
+ *
+ * Send an empty frame to signal mobile state changes and to transmit the
+ * break signal for adaption 2.
+ */
+
+static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ unsigned long flags;
+
+ if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
+ return;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_dlci_modem_output(gsm, dlci, brk);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_modem_upd_via_msc - send modem bits via control frame
+ * @dlci: channel
+ * @brk: break signal
+ */
-static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
{
- u8 modembits[5];
+ u8 modembits[3];
struct gsm_control *ctrl;
int len = 2;
- if (brk)
- len++;
+ if (dlci->gsm->encoding != 0)
+ return 0;
- modembits[0] = len << 1 | EA; /* Data bytes */
- modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
- modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
- if (brk)
- modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
- ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
+ if (!brk) {
+ modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
+ } else {
+ modembits[1] = gsm_encode_modem(dlci) << 1;
+ modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */
+ len++;
+ }
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len);
if (ctrl == NULL)
return -ENOMEM;
return gsm_control_wait(dlci->gsm, ctrl);
}
+/**
+ * gsm_modem_update - send modem status line state
+ * @dlci: channel
+ * @brk: break signal
+ */
+
+static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+ if (dlci->adaption == 2) {
+ /* Send convergence layer type 2 empty data frame. */
+ gsm_modem_upd_via_data(dlci, brk);
+ return 0;
+ } else if (dlci->gsm->encoding == 0) {
+ /* Send as MSC control message. */
+ return gsm_modem_upd_via_msc(dlci, brk);
+ }
+
+ /* Modem status lines are not supported. */
+ return -EPROTONOSUPPORT;
+}
+
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
@@ -3006,7 +3081,7 @@ static void gsm_dtr_rts(struct tty_port *port, int onoff)
modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
if (modem_tx != dlci->modem_tx) {
dlci->modem_tx = modem_tx;
- gsmtty_modem_update(dlci, 0);
+ gsm_modem_update(dlci, 0);
}
}
@@ -3141,7 +3216,7 @@ static unsigned int gsmtty_write_room(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return 0;
- return TX_SIZE - kfifo_len(&dlci->fifo);
+ return kfifo_avail(&dlci->fifo);
}
static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
@@ -3155,13 +3230,17 @@ static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
static void gsmtty_flush_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ unsigned long flags;
+
if (dlci->state == DLCI_CLOSED)
return;
/* Caution needed: If we implement reliable transport classes
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
away */
+ spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
/* Need to unhook this DLCI from the transmit queue logic */
}
@@ -3193,7 +3272,7 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
if (modem_tx != dlci->modem_tx) {
dlci->modem_tx = modem_tx;
- return gsmtty_modem_update(dlci, 0);
+ return gsm_modem_update(dlci, 0);
}
return 0;
}
@@ -3254,7 +3333,7 @@ static void gsmtty_throttle(struct tty_struct *tty)
dlci->modem_tx &= ~TIOCM_RTS;
dlci->throttled = true;
/* Send an MSC with RTS cleared */
- gsmtty_modem_update(dlci, 0);
+ gsm_modem_update(dlci, 0);
}
static void gsmtty_unthrottle(struct tty_struct *tty)
@@ -3266,7 +3345,7 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
dlci->modem_tx |= TIOCM_RTS;
dlci->throttled = false;
/* Send an MSC with RTS set */
- gsmtty_modem_update(dlci, 0);
+ gsm_modem_update(dlci, 0);
}
static int gsmtty_break_ctl(struct tty_struct *tty, int state)
@@ -3284,7 +3363,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
if (encode > 0x0F)
encode = 0x0F; /* Best effort */
}
- return gsmtty_modem_update(dlci, encode);
+ return gsm_modem_update(dlci, encode);
}
static void gsmtty_cleanup(struct tty_struct *tty)
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index f4a0caa56f84..21053db93ff1 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -37,6 +37,7 @@
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
+#define MTK_UART_EFR 38 /* I/O: Extended Features Register */
#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
@@ -53,6 +54,12 @@
#define MTK_UART_TX_TRIGGER 1
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
+#define MTK_UART_FEATURE_SEL 39 /* Feature Selection register */
+#define MTK_UART_FEAT_NEWRMAP BIT(0) /* Use new register map */
+
+#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
+#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
+
#ifdef CONFIG_SERIAL_8250_DMA
enum dma_rx_status {
DMA_RX_START = 0,
@@ -169,7 +176,7 @@ static void mtk8250_dma_enable(struct uart_8250_port *up)
MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
@@ -232,7 +239,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
int lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
lcr = serial_in(up, UART_LCR);
@@ -241,7 +248,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
@@ -255,8 +262,8 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable hw flow control*/
- serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC |
- (serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC |
+ (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
serial_out(up, UART_LCR, lcr);
@@ -270,12 +277,12 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable sw flow control */
- serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
- (serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
+ (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
- serial_out(up, UART_XON1, START_CHAR(port->state->port.tty));
- serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty));
+ serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty));
+ serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty));
serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
@@ -568,6 +575,10 @@ static int mtk8250_probe(struct platform_device *pdev)
uart.dma = data->dma;
#endif
+ /* Set AP UART new register map */
+ writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
+ (MTK_UART_FEATURE_SEL << uart.port.regshift));
+
/* Disable Rate Fix function */
writel(0x0, uart.port.membase +
(MTK_UART_RATE_FIX << uart.port.regshift));
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e17e97ea86fa..a293e9f107d0 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2667,7 +2667,7 @@ enum pci_board_num_t {
pbn_panacom2,
pbn_panacom4,
pbn_plx_romulus,
- pbn_endrun_2_4000000,
+ pbn_endrun_2_3906250,
pbn_oxsemi,
pbn_oxsemi_1_3906250,
pbn_oxsemi_2_3906250,
@@ -3195,10 +3195,10 @@ static struct pciserial_board pci_boards[] = {
* signal now many ports are available
* 2 port 952 Uart support
*/
- [pbn_endrun_2_4000000] = {
+ [pbn_endrun_2_3906250] = {
.flags = FL_BASE0,
.num_ports = 2,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
@@ -4115,7 +4115,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_endrun_2_4000000 },
+ pbn_endrun_2_3906250 },
/*
* Quatech cards. These actually have configurable clocks but for
* now we just use the default.
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 318af6f13605..1fbd5bf264be 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1675,11 +1675,11 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
- serial8250_rpm_get_tx(up);
-
if (!port->x_char && uart_circ_empty(&port->state->xmit))
return;
+ serial8250_rpm_get_tx(up);
+
if (em485 &&
em485->active_timer == &em485->start_tx_timer)
return;
@@ -3329,7 +3329,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
/*
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 51ecb050ae40..4d11a3e547f9 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1255,13 +1255,18 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
{
+ /*
+ * To be on the safe side only time out after twice as many iterations
+ * as fifo size.
+ */
+ const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
struct uart_port *port = &uap->port;
int i = 0;
u32 cr;
/* Wait until hardware tx queue is empty */
while (!pl011_tx_empty(port)) {
- if (i == port->fifosize) {
+ if (i > MAX_TX_DRAIN_ITERS) {
dev_warn(port->dev,
"timeout while draining hardware tx queue\n");
break;
@@ -2052,7 +2057,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* with the given baud rate. We use this as the poll interval when we
* wait for the tx queue to empty.
*/
- uap->rs485_tx_drain_interval = (bits * 1000 * 1000) / baud;
+ uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
pl011_setup_status_masks(port, termios);
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 6d70fea76bb3..e37a917b9dbb 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -471,11 +471,10 @@ static int digicolor_uart_probe(struct platform_device *pdev)
if (IS_ERR(uart_clk))
return PTR_ERR(uart_clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->port.mapbase = res->start;
- dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dp->port.membase))
return PTR_ERR(dp->port.membase);
+ dp->port.mapbase = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 87789872f400..be12fee94db5 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2664,6 +2664,7 @@ static int lpuart_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct lpuart_port *sport;
struct resource *res;
+ irq_handler_t handler;
int ret;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
@@ -2741,17 +2742,11 @@ static int lpuart_probe(struct platform_device *pdev)
if (lpuart_is_32(sport)) {
lpuart_reg.cons = LPUART32_CONSOLE;
- ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
- DRIVER_NAME, sport);
+ handler = lpuart32_int;
} else {
lpuart_reg.cons = LPUART_CONSOLE;
- ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
- DRIVER_NAME, sport);
+ handler = lpuart_int;
}
-
- if (ret)
- goto failed_irq_request;
-
ret = uart_add_one_port(&lpuart_reg, &sport->port);
if (ret)
goto failed_attach_port;
@@ -2773,13 +2768,18 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
+ DRIVER_NAME, sport);
+ if (ret)
+ goto failed_irq_request;
+
return 0;
+failed_irq_request:
failed_get_rs485:
failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
-failed_irq_request:
lpuart_disable_clks(sport);
failed_clock_enable:
failed_out_of_range:
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index fd38e6ed4fda..a2100be8d554 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1448,7 +1448,7 @@ static int imx_uart_startup(struct uart_port *port)
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
- if (!sport->dma_is_enabled)
+ if (!dma_is_inited)
ucr4 |= UCR4_OREN;
if (sport->inverted_rx)
ucr4 |= UCR4_INVR;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 8a6958377764..3acc0f185762 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -436,31 +436,31 @@ static void mpc512x_psc_fifo_init(struct uart_port *port)
out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
}
-static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
}
-static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
}
-static int mpc512x_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->rxsr)
& in_be32(&FIFO_512x(port)->rximr)
& MPC512x_PSC_FIFO_ALARM;
}
-static int mpc512x_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->txsr)
& in_be32(&FIFO_512x(port)->tximr)
& MPC512x_PSC_FIFO_ALARM;
}
-static int mpc512x_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_empty(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->txsr)
& MPC512x_PSC_FIFO_EMPTY;
@@ -780,29 +780,29 @@ static void mpc5125_psc_fifo_init(struct uart_port *port)
out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM);
}
-static int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
}
-static int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL);
}
-static int mpc5125_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->rxsr) &
in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM;
}
-static int mpc5125_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->txsr) &
in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM;
}
-static int mpc5125_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_empty(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY;
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index e857fb61efbf..5fb201c1b563 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1238,12 +1238,10 @@ static void sc16is7xx_shutdown(struct uart_port *port)
/* Disable all interrupts */
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0);
- /* Disable TX/RX, clear auto RS485 and RTS invert */
+ /* Disable TX/RX */
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
- SC16IS7XX_EFCR_TXDISABLE_BIT |
- SC16IS7XX_EFCR_AUTO_RS485_BIT |
- SC16IS7XX_EFCR_RTS_INVERT_BIT,
+ SC16IS7XX_EFCR_TXDISABLE_BIT,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index f9af7ebe003d..d6d515d598dc 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2684,6 +2684,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
struct usb_request *request;
struct cdns3_request *priv_req;
struct cdns3_trb *trb = NULL;
+ struct cdns3_trb trb_tmp;
int ret;
int val;
@@ -2693,8 +2694,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
priv_req = to_cdns3_request(request);
trb = priv_req->trb;
- if (trb)
+ if (trb) {
+ trb_tmp = *trb;
trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ }
}
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
@@ -2709,7 +2712,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
if (trb)
- trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ *trb = trb_tmp;
cdns3_rearm_transfer(priv_ep, 1);
}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7f2c83f299d3..eebe782380fb 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -774,6 +774,7 @@ static int wdm_release(struct inode *inode, struct file *file)
poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
+ clear_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
unpoison_urbs(desc);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6abb7294e919..b5b85bf80329 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1209,12 +1209,16 @@ static int do_proc_control(struct usb_dev_state *ps,
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &actlen);
+
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen);
if (!i && actlen) {
if (copy_to_user(ctrl->data, tbuf, actlen)) {
ret = -EFAULT;
- goto recv_fault;
+ goto done;
}
}
} else {
@@ -1231,6 +1235,10 @@ static int do_proc_control(struct usb_dev_state *ps,
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &actlen);
+
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0);
}
@@ -1242,10 +1250,6 @@ static int do_proc_control(struct usb_dev_state *ps,
}
ret = (i < 0 ? i : actlen);
- recv_fault:
- /* Linger a bit, prior to the next control message. */
- if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
- msleep(200);
done:
kfree(dr);
usb_free_urb(urb);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d3c14b5ed4a1..97b44a68668a 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -404,6 +404,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
+ { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
@@ -507,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* VCOM device */
+ { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1170b800acdc..d28cd1a6709b 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -274,7 +274,8 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ reg &= ~DWC3_DCTL_RUN_STOP;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
/*
* For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
@@ -1377,10 +1378,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- u8 rx_thr_num_pkt_prd;
- u8 rx_max_burst_prd;
- u8 tx_thr_num_pkt_prd;
- u8 tx_max_burst_prd;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+ u8 tx_max_burst_prd = 0;
u8 tx_fifo_resize_max_num;
const char *usb_psy_name;
int ret;
@@ -1690,21 +1691,44 @@ static int dwc3_probe(struct platform_device *pdev)
/*
* Clocks are optional, but new DT platforms should support all
* clocks as required by the DT-binding.
+ * Some devices have different clock names in legacy device trees,
+ * check for them to retain backwards compatibility.
*/
dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
if (IS_ERR(dwc->bus_clk))
return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
"could not get bus clock\n");
+ if (dwc->bus_clk == NULL) {
+ dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
+ if (IS_ERR(dwc->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ }
+
dwc->ref_clk = devm_clk_get_optional(dev, "ref");
if (IS_ERR(dwc->ref_clk))
return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
"could not get ref clock\n");
+ if (dwc->ref_clk == NULL) {
+ dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
+ if (IS_ERR(dwc->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ }
+
dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
if (IS_ERR(dwc->susp_clk))
return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
"could not get suspend clock\n");
+
+ if (dwc->susp_clk == NULL) {
+ dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
+ if (IS_ERR(dwc->susp_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ }
}
ret = reset_control_deassert(dwc->reset);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index b60b5f7b6dff..8cad9e7d3368 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -584,16 +584,15 @@ int dwc3_drd_init(struct dwc3 *dwc)
{
int ret, irq;
+ if (ROLE_SWITCH &&
+ device_property_read_bool(dwc->dev, "usb-role-switch"))
+ return dwc3_setup_role_switch(dwc);
+
dwc->edev = dwc3_get_extcon(dwc);
if (IS_ERR(dwc->edev))
return PTR_ERR(dwc->edev);
- if (ROLE_SWITCH &&
- device_property_read_bool(dwc->dev, "usb-role-switch")) {
- ret = dwc3_setup_role_switch(dwc);
- if (ret < 0)
- return ret;
- } else if (dwc->edev) {
+ if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 33f657d83246..2e19e0e4ea53 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -45,6 +45,8 @@
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
+#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -456,6 +458,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index ab725d2262d6..0b9c2493844a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3274,6 +3274,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event,
struct dwc3_request *req, int status)
{
+ int request_status;
int ret;
if (req->request.num_mapped_sgs)
@@ -3294,7 +3295,35 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->needs_extra_trb = false;
}
- dwc3_gadget_giveback(dep, req, status);
+ /*
+ * The event status only reflects the status of the TRB with IOC set.
+ * For the requests that don't set interrupt on completion, the driver
+ * needs to check and return the status of the completed TRBs associated
+ * with the request. Use the status of the last TRB of the request.
+ */
+ if (req->request.no_interrupt) {
+ struct dwc3_trb *trb;
+
+ trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
+ switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
+ case DWC3_TRBSTS_MISSED_ISOC:
+ /* Isoc endpoint only */
+ request_status = -EXDEV;
+ break;
+ case DWC3_TRB_STS_XFER_IN_PROG:
+ /* Applicable when End Transfer with ForceRM=0 */
+ case DWC3_TRBSTS_SETUP_PENDING:
+ /* Control endpoint only */
+ case DWC3_TRBSTS_OK:
+ default:
+ request_status = 0;
+ break;
+ }
+ } else {
+ request_status = status;
+ }
+
+ dwc3_gadget_giveback(dep, req, request_status);
out:
return ret;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 1fb837d9271e..84b73cb03f87 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1438,6 +1438,8 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
usb_ep_autoconfig_reset(cdev->gadget);
spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL;
+ cdev->deactivations = 0;
+ gadget->deactivated = false;
set_gadget_data(gadget, NULL);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 71bb5e477dba..d37965867b23 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -890,13 +890,37 @@ static void uvc_function_unbind(struct usb_configuration *c,
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ long wait_ret = 1;
uvcg_info(f, "%s()\n", __func__);
+ /* If we know we're connected via v4l2, then there should be a cleanup
+ * of the device from userspace either via UVC_EVENT_DISCONNECT or
+ * though the video device removal uevent. Allow some time for the
+ * application to close out before things get deleted.
+ */
+ if (uvc->func_connected) {
+ uvcg_dbg(f, "waiting for clean disconnect\n");
+ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
+ uvc->func_connected == false, msecs_to_jiffies(500));
+ uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
+ }
+
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev);
+ if (uvc->func_connected) {
+ /* Wait for the release to occur to ensure there are no longer any
+ * pending operations that may cause panics when resources are cleaned
+ * up.
+ */
+ uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
+ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
+ uvc->func_connected == false, msecs_to_jiffies(1000));
+ uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
+ }
+
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
@@ -915,6 +939,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
mutex_init(&uvc->video.mutex);
uvc->state = UVC_STATE_DISCONNECTED;
+ init_waitqueue_head(&uvc->func_connected_queue);
opts = fi_to_f_uvc_opts(fi);
mutex_lock(&opts->lock);
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index c3607a32b986..886103a1fe9b 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/usb/composite.h>
#include <linux/videodev2.h>
+#include <linux/wait.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
@@ -129,6 +130,7 @@ struct uvc_device {
struct usb_function func;
struct uvc_video video;
bool func_connected;
+ wait_queue_head_t func_connected_queue;
/* Descriptors */
struct {
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index d852ac9e47e7..2cda982f3765 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -264,6 +264,8 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
buf->state = UVC_BUF_STATE_ERROR;
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
}
+ queue->buf_used = 0;
+
/* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_queue_buffer and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a2c78690c5c2..fd8f73bb726d 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -253,10 +253,11 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
static void uvc_v4l2_disable(struct uvc_device *uvc)
{
- uvc->func_connected = false;
uvc_function_disconnect(uvc);
uvcg_video_enable(&uvc->video, 0);
uvcg_free_buffers(&uvc->video.queue);
+ uvc->func_connected = false;
+ wake_up_interruptible(&uvc->func_connected_queue);
}
static int
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 8d40a1f2ec57..e9440f7bf019 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -145,6 +145,7 @@ enum dev_state {
STATE_DEV_INVALID = 0,
STATE_DEV_OPENED,
STATE_DEV_INITIALIZED,
+ STATE_DEV_REGISTERING,
STATE_DEV_RUNNING,
STATE_DEV_CLOSED,
STATE_DEV_FAILED
@@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
ret = -EINVAL;
goto out_unlock;
}
+ dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 3d82e0b853be..684164fa9716 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1103,6 +1103,26 @@ static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
#ifdef CONFIG_PM
+/* Clear wakeup signal locked in zhaoxin platform when device plug in. */
+static void ehci_zx_wakeup_clear(struct ehci_hcd *ehci)
+{
+ u32 __iomem *reg = &ehci->regs->port_status[4];
+ u32 t1 = ehci_readl(ehci, reg);
+
+ t1 &= (u32)~0xf0000;
+ t1 |= PORT_TEST_FORCE;
+ ehci_writel(ehci, t1, reg);
+ t1 = ehci_readl(ehci, reg);
+ msleep(1);
+ t1 &= (u32)~0xf0000;
+ ehci_writel(ehci, t1, reg);
+ ehci_readl(ehci, reg);
+ msleep(1);
+ t1 = ehci_readl(ehci, reg);
+ ehci_writel(ehci, t1 | PORT_CSC, reg);
+ ehci_readl(ehci, reg);
+}
+
/* suspend/resume, section 4.3 */
/* These routines handle the generic parts of controller suspend/resume */
@@ -1154,6 +1174,9 @@ int ehci_resume(struct usb_hcd *hcd, bool force_reset)
if (ehci->shutdown)
return 0; /* Controller is dead */
+ if (ehci->zx_wakeup_clear_needed)
+ ehci_zx_wakeup_clear(ehci);
+
/*
* If CF is still set and reset isn't forced
* then we maintained suspend power.
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 638f03b89739..9937c5a7efc2 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -231,6 +231,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
ehci->is_aspeed = 1;
}
break;
+ case PCI_VENDOR_ID_ZHAOXIN:
+ if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90)
+ ehci->zx_wakeup_clear_needed = 1;
+ break;
}
/* optional debug port, normally in the first BAR */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index fdd073cc053b..ad3f13a3eaf1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -220,6 +220,7 @@ struct ehci_hcd { /* one per controller */
unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
unsigned spurious_oc:1;
unsigned is_aspeed:1;
+ unsigned zx_wakeup_clear_needed:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1e7dc130c39a..f65f1ba2b592 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1434,7 +1434,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
spin_unlock_irqrestore(&xhci->lock, flags);
if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
- msecs_to_jiffies(100)))
+ msecs_to_jiffies(500)))
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
hcd->self.busnum, wIndex + 1);
spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index f3139ce7b0a9..06a6b19acaae 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -19,11 +19,6 @@
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
-/*
- * max number of microframes for split transfer,
- * for fs isoc in : 1 ss + 1 idle + 7 cs
- */
-#define TT_MICROFRAMES_MAX 9
#define DBG_BUF_EN 64
@@ -242,28 +237,17 @@ static void drop_tt(struct usb_device *udev)
static struct mu3h_sch_ep_info *
create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
- struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
+ struct usb_host_endpoint *ep)
{
struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_bw_info *bw_info;
struct mu3h_sch_tt *tt = NULL;
- u32 len_bw_budget_table;
bw_info = get_bw_info(mtk, udev, ep);
if (!bw_info)
return ERR_PTR(-ENODEV);
- if (is_fs_or_ls(udev->speed))
- len_bw_budget_table = TT_MICROFRAMES_MAX;
- else if ((udev->speed >= USB_SPEED_SUPER)
- && usb_endpoint_xfer_isoc(&ep->desc))
- len_bw_budget_table = get_esit(ep_ctx);
- else
- len_bw_budget_table = 1;
-
- sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table,
- len_bw_budget_table),
- GFP_KERNEL);
+ sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
if (!sch_ep)
return ERR_PTR(-ENOMEM);
@@ -295,8 +279,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
u32 mult;
u32 esit_pkts;
u32 max_esit_payload;
- u32 *bwb_table = sch_ep->bw_budget_table;
- int i;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
@@ -332,7 +314,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
*/
sch_ep->pkts = max_burst + 1;
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
- bwb_table[0] = sch_ep->bw_cost_per_microframe;
} else if (sch_ep->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
@@ -349,7 +330,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
- bwb_table[0] = maxpkt * sch_ep->pkts;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
@@ -366,15 +346,8 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
- sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
-
- for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
- bwb_table[i] = sch_ep->bw_cost_per_microframe;
-
- /* last one <= bw_cost_per_microframe */
- bwb_table[i] = maxpkt * esit_pkts
- - i * sch_ep->bw_cost_per_microframe;
}
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
} else if (is_fs_or_ls(sch_ep->speed)) {
sch_ep->pkts = 1; /* at most one packet for each microframe */
@@ -384,28 +357,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
*/
sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
sch_ep->num_budget_microframes = sch_ep->cs_count;
- sch_ep->bw_cost_per_microframe =
- (maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
-
- /* init budget table */
- if (ep_type == ISOC_OUT_EP) {
- for (i = 0; i < sch_ep->num_budget_microframes; i++)
- bwb_table[i] = sch_ep->bw_cost_per_microframe;
- } else if (ep_type == INT_OUT_EP) {
- /* only first one consumes bandwidth, others as zero */
- bwb_table[0] = sch_ep->bw_cost_per_microframe;
- } else { /* INT_IN_EP or ISOC_IN_EP */
- bwb_table[0] = 0; /* start split */
- bwb_table[1] = 0; /* idle */
- /*
- * due to cs_count will be updated according to cs
- * position, assign all remainder budget array
- * elements as @bw_cost_per_microframe, but only first
- * @num_budget_microframes elements will be used later
- */
- for (i = 2; i < TT_MICROFRAMES_MAX; i++)
- bwb_table[i] = sch_ep->bw_cost_per_microframe;
- }
+ sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
}
}
@@ -422,7 +374,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
- bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j];
+ bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (bw > max_bw)
max_bw = bw;
}
@@ -433,18 +385,16 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, bool used)
{
+ int bw_updated;
u32 base;
- int i, j, k;
+ int i, j;
+
+ bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j);
- if (used)
- sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j];
- else
- sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j];
- }
+ for (j = 0; j < sch_ep->num_budget_microframes; j++)
+ sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
}
}
@@ -464,7 +414,7 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
*/
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
- tmp = tt->fs_bus_bw[k] + sch_ep->bw_budget_table[j];
+ tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (tmp > FS_PAYLOAD_MAX)
return -ESCH_BW_OVERFLOW;
}
@@ -538,19 +488,17 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
{
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ int bw_updated;
u32 base;
- int i, j, k;
+ int i, j;
+
+ bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j);
- if (used)
- tt->fs_bus_bw[k] += sch_ep->bw_budget_table[j];
- else
- tt->fs_bus_bw[k] -= sch_ep->bw_budget_table[j];
- }
+ for (j = 0; j < sch_ep->num_budget_microframes; j++)
+ tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
}
if (used)
@@ -710,7 +658,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
- sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx);
+ sch_ep = create_sch_ep(mtk, udev, ep);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index ffd4b493b4ba..1174a510dd38 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -83,7 +83,6 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by
* default
- * @bw_budget_table: table to record bandwidth budget per microframe
*/
struct mu3h_sch_ep_info {
u32 esit;
@@ -109,7 +108,6 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
- u32 bw_budget_table[];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5c351970cdf1..d7e0e6ebf080 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
@@ -266,7 +267,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d0b6806275e0..f9707997969d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3141,6 +3141,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_ring_deq = xhci->event_ring->dequeue;
/* ring is half-full, force isoc trbs to interrupt more often */
if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index c8af2cd2216d..996958a6565c 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1034,13 +1034,13 @@ static int tegra_xusb_unpowergate_partitions(struct tegra_xusb *tegra)
int rc;
if (tegra->use_genpd) {
- rc = pm_runtime_get_sync(tegra->genpd_dev_ss);
+ rc = pm_runtime_resume_and_get(tegra->genpd_dev_ss);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB SS partition\n");
return rc;
}
- rc = pm_runtime_get_sync(tegra->genpd_dev_host);
+ rc = pm_runtime_resume_and_get(tegra->genpd_dev_host);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB Host partition\n");
pm_runtime_put_sync(tegra->genpd_dev_ss);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 642610c78f58..25b87e99b4dd 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -781,6 +781,17 @@ void xhci_shutdown(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
+ /* Don't poll the roothubs after shutdown. */
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
+ __func__, hcd->self.busnum);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
+ if (xhci->shared_hcd) {
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
+ }
+
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
/* Workaround for spurious wakeups at shutdown with HSW */
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index f929bffdc5d1..b7f13df00764 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -186,16 +186,16 @@ static int eud_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
- ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip);
- if (ret)
- return dev_err_probe(chip->dev, ret,
- "failed to add role switch release action\n");
-
chip->role_sw = usb_role_switch_get(&pdev->dev);
if (IS_ERR(chip->role_sw))
return dev_err_probe(chip->dev, PTR_ERR(chip->role_sw),
"failed to get role switch\n");
+ ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "failed to add role switch release action\n");
+
chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 748139d26263..0be8efcda15d 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -71,6 +71,7 @@ static void destroy_priv(struct kref *kref)
dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n");
usb_put_dev(priv->usbdev);
+ priv->usbdev = NULL;
kfree(priv);
}
@@ -736,7 +737,6 @@ static int uss720_probe(struct usb_interface *intf,
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
- usb_put_dev(usbdev);
return 0;
probe_abort:
@@ -754,7 +754,6 @@ static void uss720_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (pp) {
priv = pp->private_data;
- priv->usbdev = NULL;
priv->pp = NULL;
dev_dbg(&intf->dev, "parport_remove_port\n");
parport_remove_port(pp);
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index a6b04831b20b..9b8aded3d95e 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -21,10 +21,8 @@ static inline struct ssusb_mtk *otg_sx_to_ssusb(struct otg_switch_mtk *otg_sx)
static void toggle_opstate(struct ssusb_mtk *ssusb)
{
- if (!ssusb->otg_switch.is_u3_drd) {
- mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
- mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
- }
+ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
}
/* only port0 supports dual-role mode */
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 661a229c105d..34b9f8140187 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -268,6 +268,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
return -EPROBE_DEFER;
}
+ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ nop->vbus_draw = NULL;
+ if (IS_ERR(nop->vbus_draw))
+ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
+ "could not get vbus regulator\n");
+
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a27f7efcec6a..c374620a486f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -194,6 +194,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
{ USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
+ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
+ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e7755d9cfc61..152ad882657d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
+#define CINTERION_PRODUCT_MV32_WA 0x00f1
+#define CINTERION_PRODUCT_MV32_WB 0x00f2
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1217,6 +1219,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
@@ -1233,6 +1239,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
+ .driver_info = RSVD(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1969,6 +1977,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
+ .driver_info = RSVD(3)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2111,10 +2123,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 88b284d61681..1d878d05a658 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index c5406452b774..732f9b13ad5d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -135,6 +135,7 @@
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LD381_PRODUCT_ID 0x0f7f
+#define HP_LM930_PRODUCT_ID 0x0f9b
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c18bf8164bc2..586ef5551e76 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
+ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
+ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index da65d14c9ed5..06aad0d727dd 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -584,9 +584,8 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
switch (command) {
case WHITEHEAT_GET_DTR_RTS:
info = usb_get_serial_port_data(port);
- memcpy(&info->mcr, command_info->result_buffer,
- sizeof(struct whiteheat_dr_info));
- break;
+ info->mcr = command_info->result_buffer[0];
+ break;
}
}
exit:
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 8f921213b17d..ba24847fb245 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,7 @@ config TYPEC_RT1719
tristate "Richtek RT1719 Sink Only Type-C controller driver"
depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
depends on I2C
+ depends on POWER_SUPPLY
select REGMAP_I2C
help
Say Y or M here if your system has Richtek RT1719 sink only
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index e07d26a3cd8e..f33e08eb7670 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -877,7 +877,7 @@ static int tcpci_remove(struct i2c_client *client)
/* Disable chip interrupts before unregistering port */
err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
if (err < 0)
- return err;
+ dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
index f1bd9e09bc87..8a952eaf9016 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6360.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
@@ -15,6 +15,9 @@
#include "tcpci.h"
+#define MT6360_REG_PHYCTRL1 0x80
+#define MT6360_REG_PHYCTRL3 0x82
+#define MT6360_REG_PHYCTRL7 0x86
#define MT6360_REG_VCONNCTRL1 0x8C
#define MT6360_REG_MODECTRL2 0x8F
#define MT6360_REG_SWRESET 0xA0
@@ -22,6 +25,8 @@
#define MT6360_REG_DRPCTRL1 0xA2
#define MT6360_REG_DRPCTRL2 0xA3
#define MT6360_REG_I2CTORST 0xBF
+#define MT6360_REG_PHYCTRL11 0xCA
+#define MT6360_REG_RXCTRL1 0xCE
#define MT6360_REG_RXCTRL2 0xCF
#define MT6360_REG_CTDCTRL2 0xEC
@@ -106,6 +111,27 @@ static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata)
if (ret)
return ret;
+ /* BMC PHY */
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36);
+ if (ret)
+ return ret;
+
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8);
+ if (ret)
+ return ret;
+
/* Set shipping mode off, AUTOIDLE on */
return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index f0c2fa19f3e0..a6045aef0d04 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -949,6 +949,8 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
role == TYPEC_HOST))
goto out_unlock;
+ reinit_completion(&con->complete);
+
command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_UOR_ROLE(role);
command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
@@ -956,14 +958,18 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
if (ret < 0)
goto out_unlock;
+ mutex_unlock(&con->lock);
+
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+ return -ETIMEDOUT;
+
+ return 0;
out_unlock:
mutex_unlock(&con->lock);
- return ret < 0 ? ret : 0;
+ return ret;
}
static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
@@ -985,6 +991,8 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
if (cur_role == role)
goto out_unlock;
+ reinit_completion(&con->complete);
+
command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_PDR_ROLE(role);
command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
@@ -992,11 +1000,13 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
if (ret < 0)
goto out_unlock;
+ mutex_unlock(&con->lock);
+
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out_unlock;
- }
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+ return -ETIMEDOUT;
+
+ mutex_lock(&con->lock);
/* Something has gone wrong while swapping the role */
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 2f4fb09f1e89..e0de44000d92 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -161,8 +161,10 @@ struct mlx5_vdpa_net {
struct mlx5_flow_handle *rx_rule_mcast;
bool setup;
u32 cur_num_vqs;
+ u32 rqt_size;
struct notifier_block nb;
struct vdpa_callback config_cb;
+ struct mlx5_vdpa_wq_ent cvq_ent;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -203,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
}
-static inline u32 mlx5_vdpa_max_qps(int max_vqs)
-{
- return max_vqs / 2;
-}
-
static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
{
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
return 2;
- return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
+ return mvdev->max_vqs;
}
static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
@@ -1235,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
__be32 *list;
- int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- int num;
-
- if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
- num = 1;
- else
- num = ndev->cur_num_vqs / 2;
-
- max_rqt = min_t(int, roundup_pow_of_two(num),
- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (max_rqt < 1)
- return -EOPNOTSUPP;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1262,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
- list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
+ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1281,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
__be32 *list;
- int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (max_rqt < 1)
- return -EOPNOTSUPP;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1304,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
@@ -1624,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
- newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+ newqps > ndev->rqt_size)
break;
if (ndev->cur_num_vqs == 2 * newqps) {
@@ -1658,6 +1637,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
mvdev = wqent->mvdev;
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
+
+ mutex_lock(&ndev->reslock);
+
+ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
goto out;
@@ -1696,9 +1681,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
+
+ queue_work(mvdev->wq, &wqent->work);
+ break;
}
+
out:
- kfree(wqent);
+ mutex_unlock(&ndev->reslock);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1706,7 +1695,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
- struct mlx5_vdpa_wq_ent *wqent;
if (!is_index_valid(mvdev, idx))
return;
@@ -1715,13 +1703,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
if (!mvdev->wq || !mvdev->cvq.ready)
return;
- wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
- if (!wqent)
- return;
-
- wqent->mvdev = mvdev;
- INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
- queue_work(mvdev->wq, &wqent->work);
+ queue_work(mvdev->wq, &ndev->cvq_ent.work);
return;
}
@@ -1985,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
int err;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
+ for (i = 0; i < mvdev->max_vqs; i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto err_vq;
@@ -2056,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
- ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
else
- ndev->cur_num_vqs = 2;
+ ndev->rqt_size = 1;
+
+ ndev->cur_num_vqs = 2 * ndev->rqt_size;
update_cvq_info(mvdev);
return err;
@@ -2180,7 +2164,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
goto err_mr;
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
- return 0;
+ goto err_mr;
restore_channels_info(ndev);
err = setup_driver(mvdev);
@@ -2195,12 +2179,14 @@ err_mr:
return err;
}
+/* reslock must be held for this function */
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
- mutex_lock(&ndev->reslock);
+ WARN_ON(!mutex_is_locked(&ndev->reslock));
+
if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0;
@@ -2230,7 +2216,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto err_fwd;
}
ndev->setup = true;
- mutex_unlock(&ndev->reslock);
return 0;
@@ -2241,23 +2226,23 @@ err_tir:
err_rqt:
teardown_virtqueues(ndev);
out:
- mutex_unlock(&ndev->reslock);
return err;
}
+/* reslock must be held for this function */
static void teardown_driver(struct mlx5_vdpa_net *ndev)
{
- mutex_lock(&ndev->reslock);
+
+ WARN_ON(!mutex_is_locked(&ndev->reslock));
+
if (!ndev->setup)
- goto out;
+ return;
remove_fwd_to_tir(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
teardown_virtqueues(ndev);
ndev->setup = false;
-out:
- mutex_unlock(&ndev->reslock);
}
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
@@ -2278,6 +2263,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
print_status(mvdev, status, true);
+ mutex_lock(&ndev->reslock);
+
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
err = setup_driver(mvdev);
@@ -2287,16 +2274,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
}
} else {
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
- return;
+ goto err_clear;
}
}
ndev->mvdev.status = status;
+ mutex_unlock(&ndev->reslock);
return;
err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+err_clear:
+ mutex_unlock(&ndev->reslock);
}
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2306,6 +2296,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
+
+ mutex_lock(&ndev->reslock);
teardown_driver(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
@@ -2318,6 +2310,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
+ mutex_unlock(&ndev->reslock);
return 0;
}
@@ -2353,19 +2346,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
+ mutex_lock(&ndev->reslock);
+
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
- return err;
+ goto err;
}
if (change_map)
- return mlx5_vdpa_change_map(mvdev, iotlb);
+ err = mlx5_vdpa_change_map(mvdev, iotlb);
- return 0;
+err:
+ mutex_unlock(&ndev->reslock);
+ return err;
}
static void mlx5_vdpa_free(struct vdpa_device *vdev)
@@ -2511,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
struct mlx5_vdpa_virtqueue *mvq;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
+ for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
mvq = &ndev->vqs[i];
memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
mvq->index = i;
@@ -2653,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
return -EOPNOTSUPP;
}
- max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
+ max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
+ 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
if (max_vqs < 2) {
dev_warn(mdev->device,
"%d virtqueues are supported. At least 2 are required\n",
@@ -2724,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
- config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
+ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
@@ -2740,6 +2739,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_mr;
+ ndev->cvq_ent.mvdev = mvdev;
+ INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;
@@ -2749,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
+ err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
goto err_reg;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index b7bb16f92ac6..06b6f3594a13 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -36,6 +36,10 @@ static bool nointxmask;
static bool disable_vga;
static bool disable_idle_d3;
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -434,47 +438,17 @@ out:
}
EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
-{
- struct pci_dev *physfn = pci_physfn(vdev->pdev);
- struct vfio_device *pf_dev;
-
- if (!vdev->pdev->is_virtfn)
- return NULL;
-
- pf_dev = vfio_device_get_from_dev(&physfn->dev);
- if (!pf_dev)
- return NULL;
-
- if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
- vfio_device_put(pf_dev);
- return NULL;
- }
-
- return container_of(pf_dev, struct vfio_pci_core_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
-{
- struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
-
- if (!pf_vdev)
- return;
-
- mutex_lock(&pf_vdev->vf_token->lock);
- pf_vdev->vf_token->users += val;
- WARN_ON(pf_vdev->vf_token->users < 0);
- mutex_unlock(&pf_vdev->vf_token->lock);
-
- vfio_device_put(&pf_vdev->vdev);
-}
-
void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
- vfio_pci_vf_token_user_add(vdev, -1);
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+ vdev->sriov_pf_core_dev->vf_token->users--;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_core_disable(vdev);
@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
{
vfio_pci_probe_mmaps(vdev);
vfio_spapr_pci_eeh_open(vdev->pdev);
- vfio_pci_vf_token_user_add(vdev, 1);
+
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ vdev->sriov_pf_core_dev->vf_token->users++;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
}
EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
*
* If the VF token is provided but unused, an error is generated.
*/
- if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
- return 0; /* No VF token provided or required */
-
if (vdev->pdev->is_virtfn) {
- struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+ struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
bool match;
if (!pf_vdev) {
@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
}
if (!vf_token) {
- vfio_device_put(&pf_vdev->vdev);
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
mutex_unlock(&pf_vdev->vf_token->lock);
- vfio_device_put(&pf_vdev->vdev);
-
if (!match) {
pci_info_ratelimited(vdev->pdev,
"Incorrect VF token provided for device\n");
@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *physfn;
int ret;
+ if (pdev->is_virtfn) {
+ /*
+ * If this VF was created by our vfio_pci_core_sriov_configure()
+ * then we can find the PF vfio_pci_core_device now, and due to
+ * the locking in pci_disable_sriov() it cannot change until
+ * this VF device driver is removed.
+ */
+ physfn = pci_physfn(vdev->pdev);
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+ if (cur->pdev == physfn) {
+ vdev->sriov_pf_core_dev = cur;
+ break;
+ }
+ }
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+ return 0;
+ }
+
+ /* Not a SRIOV PF */
if (!pdev->is_physfn)
return 0;
@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
+ INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
- pci_disable_sriov(pdev);
+ vfio_pci_core_sriov_configure(pdev, 0);
vfio_unregister_group_dev(&vdev->vdev);
@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
+ struct vfio_pci_core_device *vdev;
struct vfio_device *device;
int ret = 0;
+ device_lock_assert(&pdev->dev);
+
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -ENODEV;
- if (nr_virtfn == 0)
- pci_disable_sriov(pdev);
- else
+ vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+ if (nr_virtfn) {
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ /*
+ * The thread that adds the vdev to the list is the only thread
+ * that gets to call pci_enable_sriov() and we will only allow
+ * it to be called once without going through
+ * pci_disable_sriov()
+ */
+ if (!list_empty(&vdev->sriov_pfs_item)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
ret = pci_enable_sriov(pdev, nr_virtfn);
+ if (ret)
+ goto out_del;
+ ret = nr_virtfn;
+ goto out_put;
+ }
- vfio_device_put(device);
+ pci_disable_sriov(pdev);
- return ret < 0 ? ret : nr_virtfn;
+out_del:
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+out_put:
+ vfio_device_put(device);
+ return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 792ab5f23647..297b5db47454 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1450,13 +1450,9 @@ err:
return ERR_PTR(r);
}
-static struct ptr_ring *get_tap_ptr_ring(int fd)
+static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
@@ -1465,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
goto out;
ring = NULL;
out:
- fput(file);
return ring;
}
@@ -1552,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
- if (index == VHOST_NET_VQ_RX)
- nvq->rx_ring = get_tap_ptr_ring(fd);
+ if (index == VHOST_NET_VQ_RX) {
+ if (sock)
+ nvq->rx_ring = get_tap_ptr_ring(sock->file);
+ else
+ nvq->rx_ring = NULL;
+ }
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index edf169d0816e..eb3e47c58c5f 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -566,6 +566,9 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
int rv, mem, step;
+ if (!var->pixclock)
+ return -EINVAL;
+
/* Find appropriate format */
rv = svga_match_format (arkfb_formats, var, NULL);
if (rv < 0)
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 6ff16d3132e5..b26c81233b6b 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -68,7 +68,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/prom.h>
#include "../macmodes.h"
#endif
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 1aef3d6ebd88..a3e6faed7745 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -79,7 +79,6 @@
#ifdef __powerpc__
#include <asm/machdep.h>
-#include <asm/prom.h>
#include "../macmodes.h"
#endif
#ifdef __sparc__
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index b5fbd5329652..97a5972f5b1f 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -22,7 +22,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/pmac_feature.h>
#endif
diff --git a/drivers/video/fbdev/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h
index 93f403cbb415..91d81b576231 100644
--- a/drivers/video/fbdev/aty/radeonfb.h
+++ b/drivers/video/fbdev/aty/radeonfb.h
@@ -21,7 +21,7 @@
#include <asm/io.h>
-#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
+#ifdef CONFIG_SPARC
#include <asm/prom.h>
#endif
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index c5d15c6db287..771ce1f76951 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -268,8 +268,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
goto out_fb_release;
}
- cfb->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
+ cfb->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(cfb->syscon)) {
ret = PTR_ERR(cfb->syscon);
goto out_fb_release;
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index bd59e7b11ed5..aba46118b208 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -47,9 +47,6 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 34d6bb1bf82e..a6bb0e438216 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1579,7 +1579,14 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
* If it's not a platform device, at least print a warning. A
* fix would add code to remove the device from the system.
*/
- if (dev_is_platform(device)) {
+ if (!device) {
+ /* TODO: Represent each OF framebuffer as its own
+ * device in the device hierarchy. For now, offb
+ * doesn't have such a device, so unregister the
+ * framebuffer as before without warning.
+ */
+ do_unregister_framebuffer(registered_fb[i]);
+ } else if (dev_is_platform(device)) {
registered_fb[i]->forced_out = true;
platform_device_unregister(to_platform_device(device));
} else {
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 26892940c213..82e31a2d845e 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
{
if (!info)
return;
+
+ if (WARN_ON(refcount_read(&info->count)))
+ return;
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ea42ba6445b2..b3d5f884c544 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -243,6 +243,10 @@ error:
static inline void efifb_show_boot_graphics(struct fb_info *info) {}
#endif
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
static void efifb_destroy(struct fb_info *info)
{
if (efifb_pci_dev)
@@ -254,10 +258,13 @@ static void efifb_destroy(struct fb_info *info)
else
memunmap(info->screen_base);
}
+
if (request_mem_succeeded)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
}
static const struct fb_ops efifb_ops = {
@@ -620,9 +627,9 @@ static int efifb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ /* efifb_destroy takes care of info cleanup */
unregister_framebuffer(info);
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
- framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 52cce0db8bd3..09dd85553d4f 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -657,6 +657,9 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ if (!var->pixclock)
+ return -EINVAL;
+
switch (var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
@@ -740,7 +743,7 @@ static int i740fb_set_par(struct fb_info *info)
if (i)
return i;
- memset(info->screen_base, 0, info->screen_size);
+ memset_io(info->screen_base, 0, info->screen_size);
vga_protect(par);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 68288756ffff..a2f644c97f28 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -925,10 +925,12 @@ static int imxfb_probe(struct platform_device *pdev)
sizeof(struct imx_fb_videomode), GFP_KERNEL);
if (!fbi->mode) {
ret = -ENOMEM;
+ of_node_put(display_np);
goto failed_of_parse;
}
ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ of_node_put(display_np);
if (ret)
goto failed_of_parse;
}
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 25801e8e3f74..d57772f96ad2 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -494,6 +494,8 @@ static int kyrofb_set_par(struct fb_info *info)
info->var.hsync_len +
info->var.left_margin)) / 1000;
+ if (!lineclock)
+ return -EINVAL;
/* time for a frame in ns (precision in 32bpp) */
frameclock = lineclock * (info->var.yres +
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
index 759dee996af1..958be6805f87 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.h
+++ b/drivers/video/fbdev/matrox/matroxfb_base.h
@@ -47,7 +47,6 @@
#include <asm/unaligned.h>
#if defined(CONFIG_PPC_PMAC)
-#include <asm/prom.h>
#include "../macmodes.h"
#endif
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 63721337a377..a7508f5be343 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -18,6 +18,8 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#if defined(CONFIG_OF)
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#endif
#include "mb862xxfb.h"
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index 154127256a2c..03707461eced 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -127,19 +127,18 @@ EXPORT_SYMBOL_GPL(mmp_unregister_panel);
*/
struct mmp_path *mmp_get_path(const char *name)
{
- struct mmp_path *path;
- int found = 0;
+ struct mmp_path *path = NULL, *iter;
mutex_lock(&disp_lock);
- list_for_each_entry(path, &path_list, node) {
- if (!strcmp(name, path->name)) {
- found = 1;
+ list_for_each_entry(iter, &path_list, node) {
+ if (!strcmp(name, iter->name)) {
+ path = iter;
break;
}
}
mutex_unlock(&disp_lock);
- return found ? path : NULL;
+ return path;
}
EXPORT_SYMBOL_GPL(mmp_get_path);
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 966df2a07360..28d32cbf496b 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -585,7 +585,7 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
DBG("neofb_check_var");
- if (var->pixclock && PICOS2KHZ(var->pixclock) > par->maxClock)
+ if (!var->pixclock || PICOS2KHZ(var->pixclock) > par->maxClock)
return -EINVAL;
/* Is the mode larger than the LCD panel? */
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index b191bef22d98..9d9fe5c3a7a1 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -964,7 +964,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
goto err3;
hwa742.extif->set_timings(&hwa742.reg_timings);
- clk_enable(hwa742.sys_ck);
+ clk_prepare_enable(hwa742.sys_ck);
calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
@@ -1023,7 +1023,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
return 0;
err4:
- clk_disable(hwa742.sys_ck);
+ clk_disable_unprepare(hwa742.sys_ck);
err3:
hwa742.extif->cleanup();
err2:
@@ -1037,7 +1037,7 @@ static void hwa742_cleanup(void)
hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
hwa742.extif->cleanup();
hwa742.int_ctrl->cleanup();
- clk_disable(hwa742.sys_ck);
+ clk_disable_unprepare(hwa742.sys_ck);
}
struct lcd_ctrl hwa742_ctrl = {
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index 7317c9aad677..97d20dc0d1d0 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -711,7 +711,7 @@ static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
dev_err(fbdev->dev, "failed to adjust LCD rate\n");
goto fail1;
}
- clk_enable(lcdc.lcd_ck);
+ clk_prepare_enable(lcdc.lcd_ck);
r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
if (r) {
@@ -746,7 +746,7 @@ fail4:
fail3:
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
fail2:
- clk_disable(lcdc.lcd_ck);
+ clk_disable_unprepare(lcdc.lcd_ck);
fail1:
clk_put(lcdc.lcd_ck);
fail0:
@@ -760,7 +760,7 @@ static void omap_lcdc_cleanup(void)
free_fbmem();
omap_free_lcd_dma();
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
- clk_disable(lcdc.lcd_ck);
+ clk_disable_unprepare(lcdc.lcd_ck);
clk_put(lcdc.lcd_ck);
}
diff --git a/drivers/video/fbdev/omap/sossi.c b/drivers/video/fbdev/omap/sossi.c
index 80ac67f27f0d..b9cb8b386627 100644
--- a/drivers/video/fbdev/omap/sossi.c
+++ b/drivers/video/fbdev/omap/sossi.c
@@ -598,7 +598,7 @@ static int sossi_init(struct omapfb_device *fbdev)
l &= ~CONF_SOSSI_RESET_R;
omap_writel(l, MOD_CONF_CTRL_1);
- clk_enable(sossi.fck);
+ clk_prepare_enable(sossi.fck);
l = omap_readl(ARM_IDLECT2);
l &= ~(1 << 8); /* DMACK_REQ */
omap_writel(l, ARM_IDLECT2);
@@ -649,7 +649,7 @@ static int sossi_init(struct omapfb_device *fbdev)
return 0;
err:
- clk_disable(sossi.fck);
+ clk_disable_unprepare(sossi.fck);
clk_put(sossi.fck);
return r;
}
@@ -657,6 +657,7 @@ err:
static void sossi_cleanup(void)
{
omap_lcdc_free_dma_callback();
+ clk_unprepare(sossi.fck);
clk_put(sossi.fck);
iounmap(sossi.base);
}
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index ce413a9df06e..5b9e26ea6449 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -30,9 +30,9 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/nvram.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
#include "macmodes.h"
#include "platinumfb.h"
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index c68725eebee3..d3be2c64f1c0 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1504,9 +1504,7 @@ static const struct fb_ops pm2fb_ops = {
/**
- * Device initialisation
- *
- * Initialise and allocate resource for PCI device.
+ * pm2fb_probe - Initialise and allocate resource for PCI device.
*
* @pdev: PCI device.
* @id: PCI device ID.
@@ -1711,9 +1709,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/**
- * Device removal.
- *
- * Release all device resources.
+ * pm2fb_remove - Release all device resources.
*
* @pdev: PCI device to clean up.
*/
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index f1551e00eb12..8ad91c251fe6 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2256,10 +2256,10 @@ static int pxafb_probe(struct platform_device *dev)
goto failed;
for (i = 0; i < inf->num_modes; i++)
inf->modes[i] = pdata->modes[i];
+ } else {
+ inf = of_pxafb_of_mach_info(&dev->dev);
}
- if (!pdata)
- inf = of_pxafb_of_mach_info(&dev->dev);
if (IS_ERR_OR_NULL(inf))
goto failed;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 5c74253e7b2c..b93c8eb02336 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -549,6 +549,9 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
int rv, mem, step;
u16 m, n, r;
+ if (!var->pixclock)
+ return -EINVAL;
+
/* Find appropriate format */
rv = svga_match_format (s3fb_formats, var, NULL);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index aa4ebe3192ec..9a4417430b4e 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -531,9 +531,6 @@ static void sh_mobile_lcdc_display_off(struct sh_mobile_lcdc_chan *ch)
ch->tx_dev->ops->display_off(ch->tx_dev);
}
-static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-
/* -----------------------------------------------------------------------------
* Format helpers
*/
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 94fc9c6d0411..2c198561c338 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -84,6 +84,10 @@ struct simplefb_par {
static void simplefb_clocks_destroy(struct simplefb_par *par);
static void simplefb_regulators_destroy(struct simplefb_par *par);
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
static void simplefb_destroy(struct fb_info *info)
{
struct simplefb_par *par = info->par;
@@ -94,6 +98,8 @@ static void simplefb_destroy(struct fb_info *info)
if (info->screen_base)
iounmap(info->screen_base);
+ framebuffer_release(info);
+
if (mem)
release_mem_region(mem->start, resource_size(mem));
}
@@ -545,8 +551,8 @@ static int simplefb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ /* simplefb_destroy takes care of info cleanup */
unregister_framebuffer(info);
- framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 742f62986b80..f28fd69d5eb7 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -4463,7 +4463,7 @@ static void sisfb_post_sis300(struct pci_dev *pdev)
SiS_SetReg(SISCR, 0x37, 0x02);
SiS_SetReg(SISPART2, 0x00, 0x1c);
v4 = 0x00; v5 = 0x00; v6 = 0x10;
- if(ivideo->SiS_Pr.UseROM) {
+ if (ivideo->SiS_Pr.UseROM && bios) {
v4 = bios[0xf5];
v5 = bios[0xf6];
v6 = bios[0xf7];
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 4d20cb557ff0..319131bd72cf 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -996,6 +996,9 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
int ramdac = 230000; /* 230MHz for most 3D chips */
debug("enter\n");
+ if (!var->pixclock)
+ return -EINVAL;
+
/* check color depth */
if (bpp == 24)
bpp = var->bits_per_pixel = 32;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index b6ec0b8e2b72..d280733f283b 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1650,8 +1650,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
const struct device_attribute *attr;
struct dlfb_data *dlfb;
struct fb_info *info;
- int retval = -ENOMEM;
+ int retval;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *out;
/* usb initialization */
dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1665,6 +1666,12 @@ static int dlfb_usb_probe(struct usb_interface *intf,
dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
+ retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
+ if (retval) {
+ dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+ goto error;
+ }
+
dev_dbg(&intf->dev, "console enable=%d\n", console);
dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio);
dev_dbg(&intf->dev, "shadow enable=%d\n", shadow);
@@ -1674,6 +1681,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
if (!dlfb_parse_vendor_descriptor(dlfb, intf)) {
dev_err(&intf->dev,
"firmware not recognized, incompatible device?\n");
+ retval = -ENODEV;
goto error;
}
@@ -1687,8 +1695,10 @@ static int dlfb_usb_probe(struct usb_interface *intf,
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error;
+ }
dlfb->info = info;
info->par = dlfb;
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 8425afe37d7c..a6c9d4f26669 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -54,10 +54,9 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
+#include <linux/of_address.h>
#ifdef CONFIG_MAC
#include <asm/macintosh.h>
-#else
-#include <asm/prom.h>
#endif
#include "macmodes.h"
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index df6de5a9dd4c..e25e8de5ff67 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -179,6 +179,10 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
return err;
}
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
static void vesafb_destroy(struct fb_info *info)
{
struct vesafb_par *par = info->par;
@@ -188,6 +192,8 @@ static void vesafb_destroy(struct fb_info *info)
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+
+ framebuffer_release(info);
}
static struct fb_ops vesafb_ops = {
@@ -484,10 +490,10 @@ static int vesafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ /* vesafb_destroy takes care of info cleanup */
unregister_framebuffer(info);
if (((struct vesafb_par *)(info->par))->region)
release_region(0x3c0, 32);
- framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 7a959e5ba90b..a92a8c670cf0 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -321,6 +321,9 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
{
int rv, mem, step;
+ if (!var->pixclock)
+ return -EINVAL;
+
/* Find appropriate format */
rv = svga_match_format (vt8623fb_formats, var, NULL);
if (rv < 0)
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index f93b6abbe258..bebd371c6b93 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -199,7 +199,7 @@ struct display_timings *of_get_display_timings(const struct device_node *np)
struct display_timing *dt;
int r;
- dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ dt = kmalloc(sizeof(*dt), GFP_KERNEL);
if (!dt) {
pr_err("%pOF: could not allocate display_timing struct\n",
np);
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 0c1bba7c5c66..87ef258cec64 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -48,6 +48,8 @@ source "drivers/virt/nitro_enclaves/Kconfig"
source "drivers/virt/acrn/Kconfig"
+source "drivers/virt/coco/efi_secret/Kconfig"
+
source "drivers/virt/coco/sev-guest/Kconfig"
endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index b2e6e864ebbe..093674e05c40 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -9,4 +9,5 @@ obj-y += vboxguest/
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
obj-$(CONFIG_ACRN_HSM) += acrn/
+obj-$(CONFIG_EFI_SECRET) += coco/efi_secret/
obj-$(CONFIG_SEV_GUEST) += coco/sev-guest/
diff --git a/drivers/virt/coco/efi_secret/Kconfig b/drivers/virt/coco/efi_secret/Kconfig
new file mode 100644
index 000000000000..4404d198f3b2
--- /dev/null
+++ b/drivers/virt/coco/efi_secret/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config EFI_SECRET
+ tristate "EFI secret area securityfs support"
+ depends on EFI && X86_64
+ select EFI_COCO_SECRET
+ select SECURITYFS
+ help
+ This is a driver for accessing the EFI secret area via securityfs.
+ The EFI secret area is a memory area designated by the firmware for
+ confidential computing secret injection (for example for AMD SEV
+ guests). The driver exposes the secrets as files in
+ <securityfs>/secrets/coco. Files can be read and deleted (deleting
+ a file wipes the secret from memory).
+
+ To compile this driver as a module, choose M here.
+ The module will be called efi_secret.
diff --git a/drivers/virt/coco/efi_secret/Makefile b/drivers/virt/coco/efi_secret/Makefile
new file mode 100644
index 000000000000..c7047ce804f7
--- /dev/null
+++ b/drivers/virt/coco/efi_secret/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_EFI_SECRET) += efi_secret.o
diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c
new file mode 100644
index 000000000000..e700a5ef7043
--- /dev/null
+++ b/drivers/virt/coco/efi_secret/efi_secret.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * efi_secret module
+ *
+ * Copyright (C) 2022 IBM Corporation
+ * Author: Dov Murik <dovmurik@linux.ibm.com>
+ */
+
+/**
+ * DOC: efi_secret: Allow reading EFI confidential computing (coco) secret area
+ * via securityfs interface.
+ *
+ * When the module is loaded (and securityfs is mounted, typically under
+ * /sys/kernel/security), a "secrets/coco" directory is created in securityfs.
+ * In it, a file is created for each secret entry. The name of each such file
+ * is the GUID of the secret entry, and its content is the secret data.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/security.h>
+#include <linux/efi.h>
+#include <linux/cacheflush.h>
+
+#define EFI_SECRET_NUM_FILES 64
+
+struct efi_secret {
+ struct dentry *secrets_dir;
+ struct dentry *fs_dir;
+ struct dentry *fs_files[EFI_SECRET_NUM_FILES];
+ void __iomem *secret_data;
+ u64 secret_data_len;
+};
+
+/*
+ * Structure of the EFI secret area
+ *
+ * Offset Length
+ * (bytes) (bytes) Usage
+ * ------- ------- -----
+ * 0 16 Secret table header GUID (must be 1e74f542-71dd-4d66-963e-ef4287ff173b)
+ * 16 4 Length of bytes of the entire secret area
+ *
+ * 20 16 First secret entry's GUID
+ * 36 4 First secret entry's length in bytes (= 16 + 4 + x)
+ * 40 x First secret entry's data
+ *
+ * 40+x 16 Second secret entry's GUID
+ * 56+x 4 Second secret entry's length in bytes (= 16 + 4 + y)
+ * 60+x y Second secret entry's data
+ *
+ * (... and so on for additional entries)
+ *
+ * The GUID of each secret entry designates the usage of the secret data.
+ */
+
+/**
+ * struct secret_header - Header of entire secret area; this should be followed
+ * by instances of struct secret_entry.
+ * @guid: Must be EFI_SECRET_TABLE_HEADER_GUID
+ * @len: Length in bytes of entire secret area, including header
+ */
+struct secret_header {
+ efi_guid_t guid;
+ u32 len;
+} __attribute((packed));
+
+/**
+ * struct secret_entry - Holds one secret entry
+ * @guid: Secret-specific GUID (or NULL_GUID if this secret entry was deleted)
+ * @len: Length of secret entry, including its guid and len fields
+ * @data: The secret data (full of zeros if this secret entry was deleted)
+ */
+struct secret_entry {
+ efi_guid_t guid;
+ u32 len;
+ u8 data[];
+} __attribute((packed));
+
+static size_t secret_entry_data_len(struct secret_entry *e)
+{
+ return e->len - sizeof(*e);
+}
+
+static struct efi_secret the_efi_secret;
+
+static inline struct efi_secret *efi_secret_get(void)
+{
+ return &the_efi_secret;
+}
+
+static int efi_secret_bin_file_show(struct seq_file *file, void *data)
+{
+ struct secret_entry *e = file->private;
+
+ if (e)
+ seq_write(file, e->data, secret_entry_data_len(e));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(efi_secret_bin_file);
+
+/*
+ * Overwrite memory content with zeroes, and ensure that dirty cache lines are
+ * actually written back to memory, to clear out the secret.
+ */
+static void wipe_memory(void *addr, size_t size)
+{
+ memzero_explicit(addr, size);
+#ifdef CONFIG_X86
+ clflush_cache_range(addr, size);
+#endif
+}
+
+static int efi_secret_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct efi_secret *s = efi_secret_get();
+ struct inode *inode = d_inode(dentry);
+ struct secret_entry *e = (struct secret_entry *)inode->i_private;
+ int i;
+
+ if (e) {
+ /* Zero out the secret data */
+ wipe_memory(e->data, secret_entry_data_len(e));
+ e->guid = NULL_GUID;
+ }
+
+ inode->i_private = NULL;
+
+ for (i = 0; i < EFI_SECRET_NUM_FILES; i++)
+ if (s->fs_files[i] == dentry)
+ s->fs_files[i] = NULL;
+
+ /*
+ * securityfs_remove tries to lock the directory's inode, but we reach
+ * the unlink callback when it's already locked
+ */
+ inode_unlock(dir);
+ securityfs_remove(dentry);
+ inode_lock(dir);
+
+ return 0;
+}
+
+static const struct inode_operations efi_secret_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .unlink = efi_secret_unlink,
+};
+
+static int efi_secret_map_area(struct platform_device *dev)
+{
+ int ret;
+ struct efi_secret *s = efi_secret_get();
+ struct linux_efi_coco_secret_area *secret_area;
+
+ if (efi.coco_secret == EFI_INVALID_TABLE_ADDR) {
+ dev_err(&dev->dev, "Secret area address is not available\n");
+ return -EINVAL;
+ }
+
+ secret_area = memremap(efi.coco_secret, sizeof(*secret_area), MEMREMAP_WB);
+ if (secret_area == NULL) {
+ dev_err(&dev->dev, "Could not map secret area EFI config entry\n");
+ return -ENOMEM;
+ }
+ if (!secret_area->base_pa || secret_area->size < sizeof(struct secret_header)) {
+ dev_err(&dev->dev,
+ "Invalid secret area memory location (base_pa=0x%llx size=0x%llx)\n",
+ secret_area->base_pa, secret_area->size);
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ s->secret_data = ioremap_encrypted(secret_area->base_pa, secret_area->size);
+ if (s->secret_data == NULL) {
+ dev_err(&dev->dev, "Could not map secret area\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ s->secret_data_len = secret_area->size;
+ ret = 0;
+
+unmap:
+ memunmap(secret_area);
+ return ret;
+}
+
+static void efi_secret_securityfs_teardown(struct platform_device *dev)
+{
+ struct efi_secret *s = efi_secret_get();
+ int i;
+
+ for (i = (EFI_SECRET_NUM_FILES - 1); i >= 0; i--) {
+ securityfs_remove(s->fs_files[i]);
+ s->fs_files[i] = NULL;
+ }
+
+ securityfs_remove(s->fs_dir);
+ s->fs_dir = NULL;
+
+ securityfs_remove(s->secrets_dir);
+ s->secrets_dir = NULL;
+
+ dev_dbg(&dev->dev, "Removed securityfs entries\n");
+}
+
+static int efi_secret_securityfs_setup(struct platform_device *dev)
+{
+ struct efi_secret *s = efi_secret_get();
+ int ret = 0, i = 0, bytes_left;
+ unsigned char *ptr;
+ struct secret_header *h;
+ struct secret_entry *e;
+ struct dentry *dent;
+ char guid_str[EFI_VARIABLE_GUID_LEN + 1];
+
+ ptr = (void __force *)s->secret_data;
+ h = (struct secret_header *)ptr;
+ if (efi_guidcmp(h->guid, EFI_SECRET_TABLE_HEADER_GUID)) {
+ /*
+ * This is not an error: it just means that EFI defines secret
+ * area but it was not populated by the Guest Owner.
+ */
+ dev_dbg(&dev->dev, "EFI secret area does not start with correct GUID\n");
+ return -ENODEV;
+ }
+ if (h->len < sizeof(*h)) {
+ dev_err(&dev->dev, "EFI secret area reported length is too small\n");
+ return -EINVAL;
+ }
+ if (h->len > s->secret_data_len) {
+ dev_err(&dev->dev, "EFI secret area reported length is too big\n");
+ return -EINVAL;
+ }
+
+ s->secrets_dir = NULL;
+ s->fs_dir = NULL;
+ memset(s->fs_files, 0, sizeof(s->fs_files));
+
+ dent = securityfs_create_dir("secrets", NULL);
+ if (IS_ERR(dent)) {
+ dev_err(&dev->dev, "Error creating secrets securityfs directory entry err=%ld\n",
+ PTR_ERR(dent));
+ return PTR_ERR(dent);
+ }
+ s->secrets_dir = dent;
+
+ dent = securityfs_create_dir("coco", s->secrets_dir);
+ if (IS_ERR(dent)) {
+ dev_err(&dev->dev, "Error creating coco securityfs directory entry err=%ld\n",
+ PTR_ERR(dent));
+ return PTR_ERR(dent);
+ }
+ d_inode(dent)->i_op = &efi_secret_dir_inode_operations;
+ s->fs_dir = dent;
+
+ bytes_left = h->len - sizeof(*h);
+ ptr += sizeof(*h);
+ while (bytes_left >= (int)sizeof(*e) && i < EFI_SECRET_NUM_FILES) {
+ e = (struct secret_entry *)ptr;
+ if (e->len < sizeof(*e) || e->len > (unsigned int)bytes_left) {
+ dev_err(&dev->dev, "EFI secret area is corrupted\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /* Skip deleted entries (which will have NULL_GUID) */
+ if (efi_guidcmp(e->guid, NULL_GUID)) {
+ efi_guid_to_str(&e->guid, guid_str);
+
+ dent = securityfs_create_file(guid_str, 0440, s->fs_dir, (void *)e,
+ &efi_secret_bin_file_fops);
+ if (IS_ERR(dent)) {
+ dev_err(&dev->dev, "Error creating efi_secret securityfs entry\n");
+ ret = PTR_ERR(dent);
+ goto err_cleanup;
+ }
+
+ s->fs_files[i++] = dent;
+ }
+ ptr += e->len;
+ bytes_left -= e->len;
+ }
+
+ dev_info(&dev->dev, "Created %d entries in securityfs secrets/coco\n", i);
+ return 0;
+
+err_cleanup:
+ efi_secret_securityfs_teardown(dev);
+ return ret;
+}
+
+static void efi_secret_unmap_area(void)
+{
+ struct efi_secret *s = efi_secret_get();
+
+ if (s->secret_data) {
+ iounmap(s->secret_data);
+ s->secret_data = NULL;
+ s->secret_data_len = 0;
+ }
+}
+
+static int efi_secret_probe(struct platform_device *dev)
+{
+ int ret;
+
+ ret = efi_secret_map_area(dev);
+ if (ret)
+ return ret;
+
+ ret = efi_secret_securityfs_setup(dev);
+ if (ret)
+ goto err_unmap;
+
+ return ret;
+
+err_unmap:
+ efi_secret_unmap_area();
+ return ret;
+}
+
+static int efi_secret_remove(struct platform_device *dev)
+{
+ efi_secret_securityfs_teardown(dev);
+ efi_secret_unmap_area();
+ return 0;
+}
+
+static struct platform_driver efi_secret_driver = {
+ .probe = efi_secret_probe,
+ .remove = efi_secret_remove,
+ .driver = {
+ .name = "efi_secret",
+ },
+};
+
+module_platform_driver(efi_secret_driver);
+
+MODULE_DESCRIPTION("Confidential computing EFI secret area access");
+MODULE_AUTHOR("IBM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:efi_secret");
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 75c8d560bbd3..22f15f444f75 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev)
goto err;
}
- /* If restore didn't do it, mark device DRIVER_OK ourselves. */
- if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
- virtio_device_ready(dev);
+ /* Finally, tell the device we're all set */
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
virtio_config_enable(dev);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index dfe26fa17e95..617a7f4f07a8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_ballooned_pages);
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-static void __init balloon_add_region(unsigned long start_pfn,
- unsigned long pages)
+static void __init balloon_add_regions(void)
{
+#if defined(CONFIG_XEN_PV)
+ unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
+ unsigned int i;
- /*
- * If the amount of usable memory has been limited (e.g., with
- * the 'mem' command line parameter), don't add pages beyond
- * this limit.
- */
- extra_pfn_end = min(max_pfn, start_pfn + pages);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ pages = xen_extra_mem[i].n_pfns;
+ if (!pages)
+ continue;
- for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
- /* totalram_pages and totalhigh_pages do not
- include the boot-time balloon extension, so
- don't subtract from it. */
- balloon_append(pfn_to_page(pfn));
- }
+ start_pfn = xen_extra_mem[i].start_pfn;
- balloon_stats.total_pages += extra_pfn_end - start_pfn;
-}
+ /*
+ * If the amount of usable memory has been limited (e.g., with
+ * the 'mem' command line parameter), don't add pages beyond
+ * this limit.
+ */
+ extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+ for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+ balloon_append(pfn_to_page(pfn));
+
+ balloon_stats.total_pages += extra_pfn_end - start_pfn;
+ }
#endif
+}
static int __init balloon_init(void)
{
@@ -745,20 +750,7 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root);
#endif
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
- {
- int i;
-
- /*
- * Initialize the balloon with pages from the extra memory
- * regions (see arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
- if (xen_extra_mem[i].n_pfns)
- balloon_add_region(xen_extra_mem[i].start_pfn,
- xen_extra_mem[i].n_pfns);
- }
-#endif
+ balloon_add_regions();
task = kthread_run(balloon_thread, NULL, "xen-balloon");
if (IS_ERR(task)) {
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 4849f94372a4..55acb32842a3 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -178,9 +178,9 @@ static void __del_gref(struct gntalloc_gref *gref)
unsigned long addr;
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
- uint8_t *tmp = kmap(gref->page);
+ uint8_t *tmp = kmap_local_page(gref->page);
tmp[gref->notify.pgoff] = 0;
- kunmap(gref->page);
+ kunmap_local(tmp);
}
if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(gref->notify.event);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index a8b41057c382..a39f2d36dd9c 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_unpopulated_pages);
-#ifdef CONFIG_XEN_PV
-static int __init init(void)
-{
- unsigned int i;
-
- if (!xen_domain())
- return -ENODEV;
-
- if (!xen_pv_domain())
- return 0;
-
- /*
- * Initialize with pages from the extra memory regions (see
- * arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
- unsigned int j;
-
- for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
- struct page *pg =
- pfn_to_page(xen_extra_mem[i].start_pfn + j);
-
- pg->zone_device_data = page_list;
- page_list = pg;
- list_count++;
- }
- }
-
- return 0;
-}
-subsys_initcall(init);
-#endif
-
static int __init unpopulated_init(void)
{
int ret;